hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
b1fefe8b457e7bb2658bf81a1c229fe2f05cf7c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaSplitBadElement.h"
#include "CudaInsertPoint.h"
#include "CudaMesh.h"
#include <time.h>
// This function splits the encroached segments iteratively
void splitBadElements(
RealD& t_pointlist,
TriHandleD& t_point2trilist,
TetHandleD& t_point2tetlist,
PointTypeD& t_pointtypelist,
RealD& t_pointradius,
IntD& t_seglist,
TriHandleD& t_seg2trilist,
TetHandleD& t_seg2tetlist,
IntD& t_seg2parentidxlist,
IntD& t_segparentendpointidxlist,
TriStatusD& t_segstatus,
IntD& t_trifacelist,
TetHandleD& t_tri2tetlist,
TriHandleD& t_tri2trilist,
TriHandleD& t_tri2seglist,
IntD& t_tri2parentidxlist,
IntD& t_triid2parentoffsetlist,
IntD& t_triparentendpointidxlist,
TriStatusD& t_tristatus,
IntD& t_tetlist,
TetHandleD& t_neighborlist,
TriHandleD& t_tet2trilist,
TriHandleD& t_tet2seglist,
TetStatusD& t_tetstatus,
IntD& t_segencmarker,
IntD& t_subfaceencmarker,
int& numofpoints,
int& numofsubseg,
int& numofsubface,
int& numoftet,
MESHBH* behavior,
int debug_msg,
bool debug_error,
bool debug_timing
)
{
IntD t_badeleidlist;
IntD t_encseglist, t_encsubfacelist, t_badtetlist;
IntD t_threadmarker;
TetHandleD t_recordoldtetlist;
IntD t_recordoldtetidx;
int numberofbadeles;
int numberofencsegs, numberofencsubfaces, numberofbadtets;
clock_t tv[2];
int npt[2];
int code = 1;
int iteration = 0;
int counter;
while (true)
{
//printf("%d, ", iteration);
// Update the active bad elements list.
// Exclude the empty ones (their encroachment markers have already been set to -1).
numberofencsegs = updateActiveListByMarker_Slot(t_segencmarker, t_encseglist, numofsubseg);
numberofencsubfaces = updateActiveListByMarker_Slot(t_subfaceencmarker, t_encsubfacelist, numofsubface);
numberofbadtets = updateActiveListByStatus_Slot(t_tetstatus, t_badtetlist, numoftet);
if (numberofbadtets == 0)
break;
if (numberofbadtets <= behavior->minbadtets && iteration >= behavior->miniter)
{
code = 0;
break;
}
numberofbadeles = numberofencsegs + numberofencsubfaces + numberofbadtets;
if (debug_msg) printf(" Iteration #%d: number of bad elements = %d (#%d segs, #%d subfaces, #%d tets)\n",
iteration, numberofbadeles, numberofencsegs, numberofencsubfaces, numberofbadtets);
t_badeleidlist.resize(numberofbadeles);
thrust::copy_n(t_encseglist.begin(), numberofencsegs, t_badeleidlist.begin());
thrust::copy_n(t_encsubfacelist.begin(), numberofencsubfaces, t_badeleidlist.begin() + numberofencsegs);
thrust::copy_n(t_badtetlist.begin(), numberofbadtets, t_badeleidlist.begin() + numberofencsegs + numberofencsubfaces);
t_threadmarker.resize(numberofbadeles);
thrust::fill_n(t_threadmarker.begin(), numberofencsegs, 0);
thrust::fill_n(t_threadmarker.begin() + numberofencsegs, numberofencsubfaces, 1);
thrust::fill_n(t_threadmarker.begin() + numberofencsegs + numberofencsubfaces, numberofbadtets, 2);
//tv[0] = clock();
//npt[0] = numofpoints;
// Insert points concurrently
code =
insertPoint_New(
t_pointlist,
t_point2trilist,
t_point2tetlist,
t_pointtypelist,
t_pointradius,
t_seglist,
t_seg2trilist,
t_seg2tetlist,
t_seg2parentidxlist,
t_segparentendpointidxlist,
t_segstatus,
t_trifacelist,
t_tri2tetlist,
t_tri2trilist,
t_tri2seglist,
t_tri2parentidxlist,
t_triid2parentoffsetlist,
t_triparentendpointidxlist,
t_tristatus,
t_tetlist,
t_neighborlist,
t_tet2trilist,
t_tet2seglist,
t_tetstatus,
t_segencmarker,
t_subfaceencmarker,
t_badeleidlist,
t_threadmarker,
t_recordoldtetlist,
t_recordoldtetidx,
numberofbadeles,
numberofencsegs,
numberofencsubfaces,
numberofbadtets,
numofpoints,
numofsubseg,
numofsubface,
numoftet,
behavior,
-1,
-1,
iteration,
debug_msg,
debug_error,
debug_timing
);
if (!code)
break;
hipDeviceSynchronize();
//if (iteration == 3)
// break;
iteration++;
//tv[1] = clock();
//npt[1] = numofpoints;
//printf("%f, %d\n", (REAL)(tv[1] - tv[0]), npt[1] - npt[0]);
}
if (!code && debug_msg)
printf(" End with %d bad tets\n", numberofbadtets);
} | b1fefe8b457e7bb2658bf81a1c229fe2f05cf7c2.cu | #include "CudaSplitBadElement.h"
#include "CudaInsertPoint.h"
#include "CudaMesh.h"
#include <time.h>
// This function splits the encroached segments iteratively
void splitBadElements(
RealD& t_pointlist,
TriHandleD& t_point2trilist,
TetHandleD& t_point2tetlist,
PointTypeD& t_pointtypelist,
RealD& t_pointradius,
IntD& t_seglist,
TriHandleD& t_seg2trilist,
TetHandleD& t_seg2tetlist,
IntD& t_seg2parentidxlist,
IntD& t_segparentendpointidxlist,
TriStatusD& t_segstatus,
IntD& t_trifacelist,
TetHandleD& t_tri2tetlist,
TriHandleD& t_tri2trilist,
TriHandleD& t_tri2seglist,
IntD& t_tri2parentidxlist,
IntD& t_triid2parentoffsetlist,
IntD& t_triparentendpointidxlist,
TriStatusD& t_tristatus,
IntD& t_tetlist,
TetHandleD& t_neighborlist,
TriHandleD& t_tet2trilist,
TriHandleD& t_tet2seglist,
TetStatusD& t_tetstatus,
IntD& t_segencmarker,
IntD& t_subfaceencmarker,
int& numofpoints,
int& numofsubseg,
int& numofsubface,
int& numoftet,
MESHBH* behavior,
int debug_msg,
bool debug_error,
bool debug_timing
)
{
IntD t_badeleidlist;
IntD t_encseglist, t_encsubfacelist, t_badtetlist;
IntD t_threadmarker;
TetHandleD t_recordoldtetlist;
IntD t_recordoldtetidx;
int numberofbadeles;
int numberofencsegs, numberofencsubfaces, numberofbadtets;
clock_t tv[2];
int npt[2];
int code = 1;
int iteration = 0;
int counter;
while (true)
{
//printf("%d, ", iteration);
// Update the active bad elements list.
// Exclude the empty ones (their encroachment markers have already been set to -1).
numberofencsegs = updateActiveListByMarker_Slot(t_segencmarker, t_encseglist, numofsubseg);
numberofencsubfaces = updateActiveListByMarker_Slot(t_subfaceencmarker, t_encsubfacelist, numofsubface);
numberofbadtets = updateActiveListByStatus_Slot(t_tetstatus, t_badtetlist, numoftet);
if (numberofbadtets == 0)
break;
if (numberofbadtets <= behavior->minbadtets && iteration >= behavior->miniter)
{
code = 0;
break;
}
numberofbadeles = numberofencsegs + numberofencsubfaces + numberofbadtets;
if (debug_msg) printf(" Iteration #%d: number of bad elements = %d (#%d segs, #%d subfaces, #%d tets)\n",
iteration, numberofbadeles, numberofencsegs, numberofencsubfaces, numberofbadtets);
t_badeleidlist.resize(numberofbadeles);
thrust::copy_n(t_encseglist.begin(), numberofencsegs, t_badeleidlist.begin());
thrust::copy_n(t_encsubfacelist.begin(), numberofencsubfaces, t_badeleidlist.begin() + numberofencsegs);
thrust::copy_n(t_badtetlist.begin(), numberofbadtets, t_badeleidlist.begin() + numberofencsegs + numberofencsubfaces);
t_threadmarker.resize(numberofbadeles);
thrust::fill_n(t_threadmarker.begin(), numberofencsegs, 0);
thrust::fill_n(t_threadmarker.begin() + numberofencsegs, numberofencsubfaces, 1);
thrust::fill_n(t_threadmarker.begin() + numberofencsegs + numberofencsubfaces, numberofbadtets, 2);
//tv[0] = clock();
//npt[0] = numofpoints;
// Insert points concurrently
code =
insertPoint_New(
t_pointlist,
t_point2trilist,
t_point2tetlist,
t_pointtypelist,
t_pointradius,
t_seglist,
t_seg2trilist,
t_seg2tetlist,
t_seg2parentidxlist,
t_segparentendpointidxlist,
t_segstatus,
t_trifacelist,
t_tri2tetlist,
t_tri2trilist,
t_tri2seglist,
t_tri2parentidxlist,
t_triid2parentoffsetlist,
t_triparentendpointidxlist,
t_tristatus,
t_tetlist,
t_neighborlist,
t_tet2trilist,
t_tet2seglist,
t_tetstatus,
t_segencmarker,
t_subfaceencmarker,
t_badeleidlist,
t_threadmarker,
t_recordoldtetlist,
t_recordoldtetidx,
numberofbadeles,
numberofencsegs,
numberofencsubfaces,
numberofbadtets,
numofpoints,
numofsubseg,
numofsubface,
numoftet,
behavior,
-1,
-1,
iteration,
debug_msg,
debug_error,
debug_timing
);
if (!code)
break;
cudaDeviceSynchronize();
//if (iteration == 3)
// break;
iteration++;
//tv[1] = clock();
//npt[1] = numofpoints;
//printf("%f, %d\n", (REAL)(tv[1] - tv[0]), npt[1] - npt[0]);
}
if (!code && debug_msg)
printf(" End with %d bad tets\n", numberofbadtets);
} |
80df9f236fa254812220b9c022f4f04d6e83dfc8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include <assert.h>
#include <cstdio>
#include <time.h>
#include <algorithm>
using namespace std;
////// Vector Operations //////
// Vector Addition
template<typename T>
__global__ void cudaAddVec(T *a, T *b, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<n)a[id]+=b[id];
}
void deviceError(string msg)
{
cout << msg << endl;
hipDeviceReset();
}
template<typename T>
vector<T> vectorAdd(vector<T> a, vector<T> b)
{
assert(a.size() == b.size());
int n = a.size();
T *h_a = &a[0];
T *h_b = &b[0];
T *d_a, *d_b;
if(hipMalloc(&d_a, sizeof(T) * n)!=hipSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(hipMalloc(&d_b, sizeof(T) * n)!=hipSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(hipMemcpy(d_a,h_a,sizeof(T) * n, hipMemcpyHostToDevice)!=hipSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
if(hipMemcpy(d_b,h_b,sizeof(T) * n, hipMemcpyHostToDevice)!=hipSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
hipLaunchKernelGGL(( cudaAddVec), dim3(a.size()/256 + 1), dim3(256), 0, 0, d_a, d_b,n);
if(hipMemcpy(h_a, d_a, sizeof(T) * n, hipMemcpyDeviceToHost)!=hipSuccess){deviceError("Error Copying Variables From Device Back To Host"); return vector<T>();};
hipDeviceReset();
return vector<T>(h_a, h_a+n);
}
template<typename T>
vector<T> operator+(vector<T> const &a, vector<T> const &b)
{
return vectorAdd(a,b);
}
template <typename T>
vector<T>& operator+=(vector<T>& a, const vector<T>& b)
{
a = a + b;
return a;
}
// Vector Sabtraction
template<typename T>
__global__ void cudaSabVec(T *a, T *b, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<n)a[id]-=b[id];
}
template<typename T>
vector<T> vectorSab(vector<T> a, vector<T> b)
{
assert(a.size() == b.size());
int n = a.size();
T *h_a = &a[0];
T *h_b = &b[0];
T *d_a, *d_b;
if(hipMalloc(&d_a, sizeof(T) * n)!=hipSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(hipMalloc(&d_b, sizeof(T) * n)!=hipSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(hipMemcpy(d_a,h_a,sizeof(T) * n, hipMemcpyHostToDevice)!=hipSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
if(hipMemcpy(d_b,h_b,sizeof(T) * n, hipMemcpyHostToDevice)!=hipSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
hipLaunchKernelGGL(( cudaSabVec), dim3(a.size()/256 + 1), dim3(256), 0, 0, d_a, d_b,n);
if(hipMemcpy(h_a, d_a, sizeof(T) * n, hipMemcpyDeviceToHost)!=hipSuccess){deviceError("Error Copying Variables From Device Back To Host"); return vector<T>();};
hipDeviceReset();
return vector<T>(h_a, h_a+n);
}
template<typename T>
vector<T> operator-(vector<T> const &a, vector<T> const &b)
{
return vectorSab(a,b);
}
template <typename T>
vector<T>& operator-=(vector<T>& a, const vector<T>& b)
{
a = a - b;
return a;
}
// Vector Multiplication
template<typename T>
__global__ void cudaMultVec(T *a, T *b, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<n)a[id]*=b[id];
}
template<typename T>
__global__ void cudaMultVecScalr(T *a, T *b, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<n)a[id]*=b[0];
}
template<typename T>
vector<T> vectorMult(vector<T> a, vector<T> b)
{
assert(a.size() == b.size());
int n = a.size();
T *h_a = &a[0];
T *h_b = &b[0];
T *d_a, *d_b;
if(hipMalloc(&d_a, sizeof(T) * n)!=hipSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(hipMalloc(&d_b, sizeof(T) * n)!=hipSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(hipMemcpy(d_a,h_a,sizeof(T) * n, hipMemcpyHostToDevice)!=hipSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
if(hipMemcpy(d_b,h_b,sizeof(T) * n, hipMemcpyHostToDevice)!=hipSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
hipLaunchKernelGGL(( cudaMultVec), dim3(a.size()/256 + 1), dim3(256), 0, 0, d_a, d_b,n);
if(hipMemcpy(h_a, d_a, sizeof(T) * n, hipMemcpyDeviceToHost)!=hipSuccess){deviceError("Error Copying Variables From Device Back To Host"); return vector<T>();};
hipDeviceReset();
return vector<T>(h_a, h_a+n);
}
template<typename T>
vector<T> vectorMultScalr(vector<T> a, T b)
{
int n = a.size();
T *h_a = &a[0];
T *h_b = &b;
T *d_a, *d_b;
if(hipMalloc(&d_a, sizeof(T) * n)!=hipSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(hipMalloc(&d_b, sizeof(T) * n)!=hipSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(hipMemcpy(d_a,h_a,sizeof(T) * n, hipMemcpyHostToDevice)!=hipSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
if(hipMemcpy(d_b,h_b,sizeof(T) * n, hipMemcpyHostToDevice)!=hipSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
hipLaunchKernelGGL(( cudaMultVecScalr), dim3(a.size()/256 + 1), dim3(256), 0, 0, d_a, d_b,n);
if(hipMemcpy(h_a, d_a, sizeof(T) * n, hipMemcpyDeviceToHost)!=hipSuccess){deviceError("Error Copying Variables From Device Back To Host"); return vector<T>();};
hipDeviceReset();
return vector<T>(h_a, h_a+n);
}
template<typename T>
vector<T> operator*(vector<T> const &a, vector<T> const &b)
{
return vectorMult(a,b);
}
template<typename T>
vector<T> operator*(vector<T> const &a, T const &b)
{
return vectorMultScalr(a,b);
}
template <typename T>
vector<T>& operator*=(vector<T>& a, const vector<T>& b)
{
a = a * b;
return a;
}
template <typename T>
vector<T>& operator*=(vector<T>& a, const T& b)
{
a = a * b;
return a;
}
// Vector Element Summation
template<typename T>
T sumVec(vector<T> const &a)
{
T sum = 0;
for(auto e : a)sum+=e;
return sum;
}
// Vector Inner Product
template<typename T>
T dotProduct(vector<T> a, vector<T> b)
{
vector<T> temp = a * b;
return sumVec(temp);
}
// Vector Print
template<typename T>
void vectorPrint(vector<T> const &a)
{
for(auto e : a)cout << e << " ";
cout << endl;
}
template<typename T>
void operator~(vector<T> const &a)
{
vectorPrint(a);
}
////// Matrix Operations //////
// Transpose
// Transpose
template<typename T>
vector< vector<T> > transpose(vector< vector<T> > const &a)
{
vector<vector<T>> temp;
if (a.size() > 1)
{
if(true)
{
for (int i = 0; i < a.size(); i++)
{
temp.push_back(vector<T>{});
for (int j = 0; j < a[i].size(); j++)
{
temp.back().push_back(a[j][i]);
}
}
}
}
return temp;
}
// Matrix Addition
template<typename T>
__global__ void cudaMatrixAdd(int n, T* a, T* b, T* c)
{
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
c[row*n + col] = a[row*n + col] + b[row*n + col];
}
template<typename T>
vector< vector<T> > matrixAdd(vector< vector<T> > &va, vector< vector<T> > &vb)
{
assert(va.size() == vb.size() && va[0].size() == vb[0].size());
int n = va.size();
vector<vector<T>> temp(n, vector<T>(n));
T* a = new T[n * n];
T* b = new T[n * n];
T* c = new T[n * n];
T *d_a, *d_b, *d_c;
dim3 dimGrid(n,n, 1);
int ts = va.size()/256 + 1;
hipMalloc(&d_a, n * n * sizeof(T));
hipMalloc(&d_b, n * n * sizeof(T));
hipMalloc(&d_c, n * n * sizeof(T));
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
a[n * i + j] = va[i][j];
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
b[n * i + j] = vb[i][j];
hipMemcpy(d_a, a, n * n * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, n * n * sizeof(T), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cudaMatrixAdd), dim3(dimGrid), dim3(ts), 0, 0, n, d_a, d_b, d_c);
hipMemcpy(c, d_c, n * n * sizeof(T), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
temp[i][j] = c[n * i + j];
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return temp;
}
// Matrix Sabtraction
template<typename T>
__global__ void cudaMatrixSab(int n, T* a, T* b, T* c)
{
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
c[row*n + col] = a[row*n + col] - b[row*n + col];
}
template<typename T>
vector< vector<T> > matrixSab(vector< vector<T> > &va, vector< vector<T> > &vb)
{
assert(va.size() == vb.size() && va[0].size() == vb[0].size());
int n = va.size();
vector<vector<T>> temp(n, vector<T>(n));
T* a = new T[n * n];
T* b = new T[n * n];
T* c = new T[n * n];
T *d_a, *d_b, *d_c;
dim3 dimGrid(n,n, 1);
int ts = va.size()/256 + 1;
hipMalloc(&d_a, n * n * sizeof(T));
hipMalloc(&d_b, n * n * sizeof(T));
hipMalloc(&d_c, n * n * sizeof(T));
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
a[n * i + j] = va[i][j];
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
b[n * i + j] = vb[i][j];
hipMemcpy(d_a, a, n * n * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, n * n * sizeof(T), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cudaMatrixSab), dim3(dimGrid), dim3(ts), 0, 0, n, d_a, d_b, d_c);
hipMemcpy(c, d_c, n * n * sizeof(T), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
temp[i][j] = c[n * i + j];
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return temp;
}
// Matrix Multiplication
template<typename T>
__global__ void cudaMatrixMult(int m, int n, int k, T* a, T* b, T* c)
{
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if (row < m && col < k)
{
T tmp = (T)0;
for (int i = 0; i < n; i++)
tmp += a[row * n + i] * b[col + i * k];
c[row * k + col] = tmp;
}
}
template<typename T>
vector< vector<T> > matrixMult(vector< vector<T> > const& va, vector< vector<T> > const& vb)
{
int m = va.size();
int n = va[0].size();
int k = vb[0].size();
vector<vector<T>> temp(m, vector<T>(k));
T* a = new T[m * n];
T* b = new T[n * k];
T* c = new T[m * k];
T *d_a, *d_b, *d_c;
dim3 dimGrid(k,m, 1);
int ts = va.size()/256 + 1;
hipMalloc(&d_a, m * n * sizeof(T));
hipMalloc(&d_b, n * k * sizeof(T));
hipMalloc(&d_c, m * k * sizeof(T));
for (int i = 0; i < m; i++)
for (int j = 0; j < n; j++)
a[n * i + j] = va[i][j];
for (int i = 0; i < n; i++)
for (int j = 0; j < k; j++)
b[k * i + j] = vb[i][j];
hipMemcpy(d_a, a, m * n * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, n * k * sizeof(T), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cudaMatrixMult), dim3(dimGrid), dim3(ts), 0, 0, m, n, k, d_a, d_b, d_c);
hipMemcpy(c, d_c, m * k * sizeof(T), hipMemcpyDeviceToHost);
for (int i = 0; i < m; i++)
for (int j = 0; j < k; j++)
temp[i][j] = c[k * i + j];
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return temp;
}
// Matrix Scalar Mult
template<typename T>
__global__ void cudaMatrixScalarMult(int n, T *a, T *b, T *c)
{
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
c[row*n + col] = a[row*n + col] * b[0];
}
template<typename T>
vector< vector<T> > matrixScalarMult(vector< vector<T> > va, T vb)
{
int n = va.size();
vector<vector<T>> temp(n, vector<T>(n));
T* a = new T[n * n];
T* b = &vb;
T* c = new T[n * n];
T *d_a, *d_b, *d_c;
dim3 dimGrid(n,n, 1);
int ts = va.size()/256 + 1;
hipMalloc(&d_a, n * n * sizeof(T));
hipMalloc(&d_b, sizeof(T));
hipMalloc(&d_c, n * n * sizeof(T));
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
a[n * i + j] = va[i][j];
hipMemcpy(d_a, a, n * n * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(T), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cudaMatrixScalarMult), dim3(dimGrid), dim3(ts), 0, 0, n, d_a, d_b, d_c);
hipMemcpy(c, d_c, n * n * sizeof(T), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
temp[i][j] = c[n * i + j];
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return temp;
}
// Matrix Determinant
template<typename T>
vector< vector<T> > shave(vector< vector<T> > a, int i)
{
a.erase(a.begin() + 0);
for (int j = 0; j < a.size(); j++)
{
a[j].erase(begin(a[j]) + i);
}
return a;
}
template<typename T>
vector< vector<T> > shave(vector< vector<T> > a, int i, int j)
{
a.erase(a.begin() + j);
for (int j = 0; j < a.size(); j++)
{
a[j].erase(begin(a[j]) + i);
}
return a;
}
template<typename T>
float det2x2(vector< vector<T> > const &a)
{
return a[0][0] * a[1][1] - a[0][1] * a[1][0];
}
template<typename T>
float detNxN(vector< vector<T> > const &a);
template<typename T>
float det(vector< vector<T> > const &a)
{
assert(a.size() == a[0].size());
if (a[0].size() == 2 && a.size() == 2)return det2x2(a);
else return detNxN(a);
}
template<typename T>
float detNxN(vector< vector<T> > const &a)
{
float sum = 0;
for (int i = 0; i < a.size(); i++)
{
if ((i + 1) % 2 == 0)
{
sum += a[0][i] * det(shave(a, i)) * (-1);
}
else
{
sum += a[0][i] * det(shave(a, i)) * (1);
}
}
return sum;
}
// Cofactor Matrix
template<typename T>
vector< vector<T> > cof(vector< vector<T> > const &a)
{
vector< vector<T> > cofactors;
for (int i = 0; i < a[0].size(); i++)
{
cofactors.push_back(vector<T>{});
for (int j = 0; j < a.size(); j++)
{
int g = ((i + 1 + j) % 2 == 0) ? -1 : 1;
cofactors.back().push_back(det(shave(a, i, j)) * g);
}
}
cofactors = transpose(cofactors);
return cofactors;
}
// Matrix Inverse
template<typename T>
vector< vector<T> > inv(vector< vector<T> > a)
{
if (a[0].size() >= 3)
{
float detr = det(a);
vector< vector<T> > inv = cof(a);
inv = transpose(inv);
inv = matrixScalarMult(inv,1/detr);
return inv;
}
else
{
vector< vector<T> > inv({ { a[1][1],a[0][1] * -1 },{ a[1][0] * -1, a[0][0] } });
matrixScalarMult(inv, (1/det(a)));
return inv;
}
}
// Printing Matrices
template<typename T>
void printMatrix(vector< vector<T> > const &c)
{
for(int i = 0; i < c.size(); i++)
{
for(int j = 0; j < c[i].size(); j++)
{
cout << c[i][j] << "\t";
}
cout << endl;
}
}
int main()
{
////// Testing //////
vector< vector<float> > a;
vector< vector<float> > b;
for(int i = 1; i <= 4; i++){
a.push_back(vector<float>());
b.push_back(vector<float>());
for(int j = 1; j <= 4; j++){
a.back().push_back(rand()%10);
b.back().push_back(rand()%10);
}
}
vector< vector<float> > test = {{5,7,8},{0,5,4},{6,7,9}};
vector< vector<float> > iTest = inv(test);
printMatrix(iTest);
cout << endl;
cout << "Gaze upon the power of the GPU!" << endl;
vector < vector<float> > ss{ { 8,3 },{ 1,2 } };
vector < vector<float> > result{ {46},{9} };
float detr = det(ss);
ss = inv(ss);
matrixMult(ss,result);
printMatrix(ss);
cout << endl;
vector< vector<float> > c = matrixAdd(a,b);
printMatrix(a);cout << '\n';
printMatrix(b);cout << '\n';
printMatrix(c);
////// (cuda)Success!! ////// \(^o^)/
return 0;
}
| 80df9f236fa254812220b9c022f4f04d6e83dfc8.cu | #include <cuda.h>
#include <iostream>
#include <vector>
#include <assert.h>
#include <cstdio>
#include <time.h>
#include <algorithm>
using namespace std;
////// Vector Operations //////
// Vector Addition
template<typename T>
__global__ void cudaAddVec(T *a, T *b, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<n)a[id]+=b[id];
}
void deviceError(string msg)
{
cout << msg << endl;
cudaDeviceReset();
}
template<typename T>
vector<T> vectorAdd(vector<T> a, vector<T> b)
{
assert(a.size() == b.size());
int n = a.size();
T *h_a = &a[0];
T *h_b = &b[0];
T *d_a, *d_b;
if(cudaMalloc(&d_a, sizeof(T) * n)!=cudaSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(cudaMalloc(&d_b, sizeof(T) * n)!=cudaSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(cudaMemcpy(d_a,h_a,sizeof(T) * n, cudaMemcpyHostToDevice)!=cudaSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
if(cudaMemcpy(d_b,h_b,sizeof(T) * n, cudaMemcpyHostToDevice)!=cudaSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
cudaAddVec<<<a.size()/256 + 1, 256>>>(d_a, d_b,n);
if(cudaMemcpy(h_a, d_a, sizeof(T) * n, cudaMemcpyDeviceToHost)!=cudaSuccess){deviceError("Error Copying Variables From Device Back To Host"); return vector<T>();};
cudaDeviceReset();
return vector<T>(h_a, h_a+n);
}
template<typename T>
vector<T> operator+(vector<T> const &a, vector<T> const &b)
{
return vectorAdd(a,b);
}
template <typename T>
vector<T>& operator+=(vector<T>& a, const vector<T>& b)
{
a = a + b;
return a;
}
// Vector Sabtraction
template<typename T>
__global__ void cudaSabVec(T *a, T *b, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<n)a[id]-=b[id];
}
template<typename T>
vector<T> vectorSab(vector<T> a, vector<T> b)
{
assert(a.size() == b.size());
int n = a.size();
T *h_a = &a[0];
T *h_b = &b[0];
T *d_a, *d_b;
if(cudaMalloc(&d_a, sizeof(T) * n)!=cudaSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(cudaMalloc(&d_b, sizeof(T) * n)!=cudaSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(cudaMemcpy(d_a,h_a,sizeof(T) * n, cudaMemcpyHostToDevice)!=cudaSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
if(cudaMemcpy(d_b,h_b,sizeof(T) * n, cudaMemcpyHostToDevice)!=cudaSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
cudaSabVec<<<a.size()/256 + 1, 256>>>(d_a, d_b,n);
if(cudaMemcpy(h_a, d_a, sizeof(T) * n, cudaMemcpyDeviceToHost)!=cudaSuccess){deviceError("Error Copying Variables From Device Back To Host"); return vector<T>();};
cudaDeviceReset();
return vector<T>(h_a, h_a+n);
}
template<typename T>
vector<T> operator-(vector<T> const &a, vector<T> const &b)
{
return vectorSab(a,b);
}
template <typename T>
vector<T>& operator-=(vector<T>& a, const vector<T>& b)
{
a = a - b;
return a;
}
// Vector Multiplication
template<typename T>
__global__ void cudaMultVec(T *a, T *b, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<n)a[id]*=b[id];
}
template<typename T>
__global__ void cudaMultVecScalr(T *a, T *b, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<n)a[id]*=b[0];
}
template<typename T>
vector<T> vectorMult(vector<T> a, vector<T> b)
{
assert(a.size() == b.size());
int n = a.size();
T *h_a = &a[0];
T *h_b = &b[0];
T *d_a, *d_b;
if(cudaMalloc(&d_a, sizeof(T) * n)!=cudaSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(cudaMalloc(&d_b, sizeof(T) * n)!=cudaSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(cudaMemcpy(d_a,h_a,sizeof(T) * n, cudaMemcpyHostToDevice)!=cudaSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
if(cudaMemcpy(d_b,h_b,sizeof(T) * n, cudaMemcpyHostToDevice)!=cudaSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
cudaMultVec<<<a.size()/256 + 1, 256>>>(d_a, d_b,n);
if(cudaMemcpy(h_a, d_a, sizeof(T) * n, cudaMemcpyDeviceToHost)!=cudaSuccess){deviceError("Error Copying Variables From Device Back To Host"); return vector<T>();};
cudaDeviceReset();
return vector<T>(h_a, h_a+n);
}
template<typename T>
vector<T> vectorMultScalr(vector<T> a, T b)
{
int n = a.size();
T *h_a = &a[0];
T *h_b = &b;
T *d_a, *d_b;
if(cudaMalloc(&d_a, sizeof(T) * n)!=cudaSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(cudaMalloc(&d_b, sizeof(T) * n)!=cudaSuccess){deviceError("Error Allocating Memory To Device"); return vector<T>();};
if(cudaMemcpy(d_a,h_a,sizeof(T) * n, cudaMemcpyHostToDevice)!=cudaSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
if(cudaMemcpy(d_b,h_b,sizeof(T) * n, cudaMemcpyHostToDevice)!=cudaSuccess){deviceError("Error Copying Variables To Device"); return vector<T>();};
cudaMultVecScalr<<<a.size()/256 + 1, 256>>>(d_a, d_b,n);
if(cudaMemcpy(h_a, d_a, sizeof(T) * n, cudaMemcpyDeviceToHost)!=cudaSuccess){deviceError("Error Copying Variables From Device Back To Host"); return vector<T>();};
cudaDeviceReset();
return vector<T>(h_a, h_a+n);
}
template<typename T>
vector<T> operator*(vector<T> const &a, vector<T> const &b)
{
return vectorMult(a,b);
}
template<typename T>
vector<T> operator*(vector<T> const &a, T const &b)
{
return vectorMultScalr(a,b);
}
template <typename T>
vector<T>& operator*=(vector<T>& a, const vector<T>& b)
{
a = a * b;
return a;
}
template <typename T>
vector<T>& operator*=(vector<T>& a, const T& b)
{
a = a * b;
return a;
}
// Vector Element Summation
template<typename T>
T sumVec(vector<T> const &a)
{
T sum = 0;
for(auto e : a)sum+=e;
return sum;
}
// Vector Inner Product
template<typename T>
T dotProduct(vector<T> a, vector<T> b)
{
vector<T> temp = a * b;
return sumVec(temp);
}
// Vector Print
template<typename T>
void vectorPrint(vector<T> const &a)
{
for(auto e : a)cout << e << " ";
cout << endl;
}
template<typename T>
void operator~(vector<T> const &a)
{
vectorPrint(a);
}
////// Matrix Operations //////
// Transpose
// Transpose
template<typename T>
vector< vector<T> > transpose(vector< vector<T> > const &a)
{
vector<vector<T>> temp;
if (a.size() > 1)
{
if(true)
{
for (int i = 0; i < a.size(); i++)
{
temp.push_back(vector<T>{});
for (int j = 0; j < a[i].size(); j++)
{
temp.back().push_back(a[j][i]);
}
}
}
}
return temp;
}
// Matrix Addition
template<typename T>
__global__ void cudaMatrixAdd(int n, T* a, T* b, T* c)
{
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
c[row*n + col] = a[row*n + col] + b[row*n + col];
}
template<typename T>
vector< vector<T> > matrixAdd(vector< vector<T> > &va, vector< vector<T> > &vb)
{
assert(va.size() == vb.size() && va[0].size() == vb[0].size());
int n = va.size();
vector<vector<T>> temp(n, vector<T>(n));
T* a = new T[n * n];
T* b = new T[n * n];
T* c = new T[n * n];
T *d_a, *d_b, *d_c;
dim3 dimGrid(n,n, 1);
int ts = va.size()/256 + 1;
cudaMalloc(&d_a, n * n * sizeof(T));
cudaMalloc(&d_b, n * n * sizeof(T));
cudaMalloc(&d_c, n * n * sizeof(T));
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
a[n * i + j] = va[i][j];
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
b[n * i + j] = vb[i][j];
cudaMemcpy(d_a, a, n * n * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, n * n * sizeof(T), cudaMemcpyHostToDevice);
cudaMatrixAdd<<<dimGrid, ts>>>(n, d_a, d_b, d_c);
cudaMemcpy(c, d_c, n * n * sizeof(T), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
temp[i][j] = c[n * i + j];
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return temp;
}
// Matrix Sabtraction
template<typename T>
__global__ void cudaMatrixSab(int n, T* a, T* b, T* c)
{
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
c[row*n + col] = a[row*n + col] - b[row*n + col];
}
template<typename T>
vector< vector<T> > matrixSab(vector< vector<T> > &va, vector< vector<T> > &vb)
{
assert(va.size() == vb.size() && va[0].size() == vb[0].size());
int n = va.size();
vector<vector<T>> temp(n, vector<T>(n));
T* a = new T[n * n];
T* b = new T[n * n];
T* c = new T[n * n];
T *d_a, *d_b, *d_c;
dim3 dimGrid(n,n, 1);
int ts = va.size()/256 + 1;
cudaMalloc(&d_a, n * n * sizeof(T));
cudaMalloc(&d_b, n * n * sizeof(T));
cudaMalloc(&d_c, n * n * sizeof(T));
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
a[n * i + j] = va[i][j];
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
b[n * i + j] = vb[i][j];
cudaMemcpy(d_a, a, n * n * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, n * n * sizeof(T), cudaMemcpyHostToDevice);
cudaMatrixSab<<<dimGrid, ts>>>(n, d_a, d_b, d_c);
cudaMemcpy(c, d_c, n * n * sizeof(T), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
temp[i][j] = c[n * i + j];
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return temp;
}
// Matrix Multiplication
template<typename T>
__global__ void cudaMatrixMult(int m, int n, int k, T* a, T* b, T* c)
{
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if (row < m && col < k)
{
T tmp = (T)0;
for (int i = 0; i < n; i++)
tmp += a[row * n + i] * b[col + i * k];
c[row * k + col] = tmp;
}
}
template<typename T>
vector< vector<T> > matrixMult(vector< vector<T> > const& va, vector< vector<T> > const& vb)
{
int m = va.size();
int n = va[0].size();
int k = vb[0].size();
vector<vector<T>> temp(m, vector<T>(k));
T* a = new T[m * n];
T* b = new T[n * k];
T* c = new T[m * k];
T *d_a, *d_b, *d_c;
dim3 dimGrid(k,m, 1);
int ts = va.size()/256 + 1;
cudaMalloc(&d_a, m * n * sizeof(T));
cudaMalloc(&d_b, n * k * sizeof(T));
cudaMalloc(&d_c, m * k * sizeof(T));
for (int i = 0; i < m; i++)
for (int j = 0; j < n; j++)
a[n * i + j] = va[i][j];
for (int i = 0; i < n; i++)
for (int j = 0; j < k; j++)
b[k * i + j] = vb[i][j];
cudaMemcpy(d_a, a, m * n * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, n * k * sizeof(T), cudaMemcpyHostToDevice);
cudaMatrixMult<<<dimGrid, ts>>>(m, n, k, d_a, d_b, d_c);
cudaMemcpy(c, d_c, m * k * sizeof(T), cudaMemcpyDeviceToHost);
for (int i = 0; i < m; i++)
for (int j = 0; j < k; j++)
temp[i][j] = c[k * i + j];
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return temp;
}
// Matrix Scalar Mult
template<typename T>
__global__ void cudaMatrixScalarMult(int n, T *a, T *b, T *c)
{
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
c[row*n + col] = a[row*n + col] * b[0];
}
template<typename T>
vector< vector<T> > matrixScalarMult(vector< vector<T> > va, T vb)
{
int n = va.size();
vector<vector<T>> temp(n, vector<T>(n));
T* a = new T[n * n];
T* b = &vb;
T* c = new T[n * n];
T *d_a, *d_b, *d_c;
dim3 dimGrid(n,n, 1);
int ts = va.size()/256 + 1;
cudaMalloc(&d_a, n * n * sizeof(T));
cudaMalloc(&d_b, sizeof(T));
cudaMalloc(&d_c, n * n * sizeof(T));
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
a[n * i + j] = va[i][j];
cudaMemcpy(d_a, a, n * n * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(T), cudaMemcpyHostToDevice);
cudaMatrixScalarMult<<<dimGrid, ts>>>(n, d_a, d_b, d_c);
cudaMemcpy(c, d_c, n * n * sizeof(T), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
temp[i][j] = c[n * i + j];
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return temp;
}
// Matrix Determinant
template<typename T>
vector< vector<T> > shave(vector< vector<T> > a, int i)
{
a.erase(a.begin() + 0);
for (int j = 0; j < a.size(); j++)
{
a[j].erase(begin(a[j]) + i);
}
return a;
}
template<typename T>
vector< vector<T> > shave(vector< vector<T> > a, int i, int j)
{
a.erase(a.begin() + j);
for (int j = 0; j < a.size(); j++)
{
a[j].erase(begin(a[j]) + i);
}
return a;
}
template<typename T>
float det2x2(vector< vector<T> > const &a)
{
return a[0][0] * a[1][1] - a[0][1] * a[1][0];
}
template<typename T>
float detNxN(vector< vector<T> > const &a);
template<typename T>
float det(vector< vector<T> > const &a)
{
assert(a.size() == a[0].size());
if (a[0].size() == 2 && a.size() == 2)return det2x2(a);
else return detNxN(a);
}
template<typename T>
float detNxN(vector< vector<T> > const &a)
{
float sum = 0;
for (int i = 0; i < a.size(); i++)
{
if ((i + 1) % 2 == 0)
{
sum += a[0][i] * det(shave(a, i)) * (-1);
}
else
{
sum += a[0][i] * det(shave(a, i)) * (1);
}
}
return sum;
}
// Cofactor Matrix
template<typename T>
vector< vector<T> > cof(vector< vector<T> > const &a)
{
vector< vector<T> > cofactors;
for (int i = 0; i < a[0].size(); i++)
{
cofactors.push_back(vector<T>{});
for (int j = 0; j < a.size(); j++)
{
int g = ((i + 1 + j) % 2 == 0) ? -1 : 1;
cofactors.back().push_back(det(shave(a, i, j)) * g);
}
}
cofactors = transpose(cofactors);
return cofactors;
}
// Matrix Inverse
template<typename T>
vector< vector<T> > inv(vector< vector<T> > a)
{
if (a[0].size() >= 3)
{
float detr = det(a);
vector< vector<T> > inv = cof(a);
inv = transpose(inv);
inv = matrixScalarMult(inv,1/detr);
return inv;
}
else
{
vector< vector<T> > inv({ { a[1][1],a[0][1] * -1 },{ a[1][0] * -1, a[0][0] } });
matrixScalarMult(inv, (1/det(a)));
return inv;
}
}
// Printing Matrices
template<typename T>
void printMatrix(vector< vector<T> > const &c)
{
for(int i = 0; i < c.size(); i++)
{
for(int j = 0; j < c[i].size(); j++)
{
cout << c[i][j] << "\t";
}
cout << endl;
}
}
int main()
{
////// Testing //////
vector< vector<float> > a;
vector< vector<float> > b;
for(int i = 1; i <= 4; i++){
a.push_back(vector<float>());
b.push_back(vector<float>());
for(int j = 1; j <= 4; j++){
a.back().push_back(rand()%10);
b.back().push_back(rand()%10);
}
}
vector< vector<float> > test = {{5,7,8},{0,5,4},{6,7,9}};
vector< vector<float> > iTest = inv(test);
printMatrix(iTest);
cout << endl;
cout << "Gaze upon the power of the GPU!" << endl;
vector < vector<float> > ss{ { 8,3 },{ 1,2 } };
vector < vector<float> > result{ {46},{9} };
float detr = det(ss);
ss = inv(ss);
matrixMult(ss,result);
printMatrix(ss);
cout << endl;
vector< vector<float> > c = matrixAdd(a,b);
printMatrix(a);cout << '\n';
printMatrix(b);cout << '\n';
printMatrix(c);
////// (cuda)Success!! ////// \(^o^)/
return 0;
}
|
9fdb64b36dc41c10aa7a8589fa4698996995bb51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "rocblas.h"
#include "gen_relative_pos_bias.h"
#include "reduce_kernel_utils.cuh"
#include "src/fastertransformer/kernels/activation_kernels.h"
#include "src/fastertransformer/utils/cuda_utils.h"
#include <cstdio>
namespace fastertransformer {
/******************* invokeGenRelativePosBias ***********************/
// relative_position_bias_table is [(2*window_size-1)*(2*window_size-1), headNum]
// relative_position_bias is [head_num, window_size^2, window_size^2]
// grid(window_size*window_size, head_num)
// block(window_size*window_size)
template<typename T, typename Tindex>
__global__ void gen_relative_pos_bias(T* relative_position_bias,
const T* relative_position_bias_table,
const Tindex* relative_position_bias_index,
const int window_size,
const int head_num)
{
const int h_in_window = blockIdx.x / window_size;
const int w_in_window = blockIdx.x % window_size;
const int h_in_token = threadIdx.x / window_size;
const int w_in_token = threadIdx.x % window_size;
const int head_idx = blockIdx.y;
const int elements_per_window = window_size * window_size;
const size_t elements_per_window_2 = elements_per_window * elements_per_window;
const size_t output_idx = head_idx * elements_per_window_2 + blockIdx.x * elements_per_window + threadIdx.x;
if (output_idx < head_num * elements_per_window_2) {
const Tindex idx_in_table =
relative_position_bias_index[(h_in_window * window_size + w_in_window) * elements_per_window
+ h_in_token * window_size + w_in_token];
relative_position_bias[output_idx] = relative_position_bias_table[idx_in_table * head_num + head_idx];
}
}
template<typename T, typename Tindex>
void invokeGenRelativePosBias(T* relative_position_bias,
const T* relative_position_bias_table,
const Tindex* relative_position_bias_index,
const int window_size,
const int head_num,
hipStream_t stream)
{
dim3 grid(window_size * window_size, head_num);
dim3 block(window_size * window_size);
if (block.x > 1024) {
printf("[ERROR][invokeGenRelativePosBias] window_size*window_size > 1024.\n");
exit(-1);
}
hipLaunchKernelGGL(( gen_relative_pos_bias), dim3(grid), dim3(block), 0, stream,
relative_position_bias, relative_position_bias_table, relative_position_bias_index, window_size, head_num);
}
/******************* invokeGenRelativePosBiasV2 ***********************/
template<typename T, typename Tindex>
void invokeGenRelativePosBiasV2(T* relative_position_bias,
const T* relative_coords_table,
const Tindex* relative_position_bias_index,
const T* cpb_mlp_weight1,
const T* cpb_mlp_bias1,
const T* cpb_mlp_weight2,
const int window_size,
const int cpb_mlp_in_dim,
const int cpb_mlp_out_dim,
const int head_num,
hipStream_t stream)
{
dim3 grid(window_size * window_size, head_num);
dim3 block(window_size * window_size);
if (block.x > 1024) {
printf("[ERROR][invokeGenRelativePosBias] window_size*window_size > 1024.\n");
exit(-1);
}
T* relative_position_bias_table;
check_cuda_error(hipMalloc(&relative_position_bias_table,
((2 * window_size - 1) * (2 * window_size - 1) * head_num) * sizeof(T)));
T* cpb_mlp_1;
check_cuda_error(
hipMalloc(&cpb_mlp_1, ((2 * window_size - 1) * (2 * window_size - 1) * cpb_mlp_out_dim) * sizeof(T)));
hipblasHandle_t cublas_handle;
check_cuda_error(hipblasCreate(&cublas_handle));
int m = (2 * window_size - 1) * (2 * window_size - 1);
T alpha = (T)1.0f;
T beta = (T)0.0f;
hipDataType type = std::is_same<float, T>::value ? HIP_R_32F : HIP_R_16F;
#if (CUDART_VERSION >= 11000)
hipblasComputeType_t compute_type = std::is_same<float, T>::value ? CUBLAS_COMPUTE_32F : CUBLAS_COMPUTE_16F;
#else
hipDataType compute_type = std::is_same<float, T>::value ? HIP_R_32F : HIP_R_16F;
#endif
hipblasGemmAlgo_t algo = std::is_same<float, T>::value ? HIPBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP;
check_cuda_error(hipblasGemmEx(cublas_handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
cpb_mlp_out_dim,
m,
cpb_mlp_in_dim,
&alpha,
cpb_mlp_weight1,
type,
cpb_mlp_in_dim,
relative_coords_table,
type,
cpb_mlp_in_dim,
&beta,
cpb_mlp_1,
type,
cpb_mlp_out_dim,
compute_type,
algo));
invokeGenericActivation<ReluActivation, T, T>(
cpb_mlp_1, cpb_mlp_bias1, nullptr, nullptr, nullptr, nullptr, m, cpb_mlp_out_dim, 0, nullptr, nullptr, stream);
check_cuda_error(hipblasGemmEx(cublas_handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
head_num,
m,
cpb_mlp_out_dim,
&alpha,
cpb_mlp_weight2,
type,
cpb_mlp_out_dim,
cpb_mlp_1,
type,
cpb_mlp_out_dim,
&beta,
relative_position_bias_table,
type,
head_num,
compute_type,
algo));
hipLaunchKernelGGL(( gen_relative_pos_bias), dim3(grid), dim3(block), 0, stream,
relative_position_bias, relative_position_bias_table, relative_position_bias_index, window_size, head_num);
invokeSigmoid(
relative_position_bias, window_size * window_size * window_size * window_size * head_num, 16.0f, stream);
check_cuda_error(hipFree(relative_position_bias_table));
check_cuda_error(hipFree(cpb_mlp_1));
check_cuda_error(hipblasDestroy(cublas_handle));
}
/******************* instantiation ***********************/
template void invokeGenRelativePosBias(float* relative_position_bias,
const float* relative_position_bias_table,
const int* relative_position_bias_index,
const int window_size,
const int head_num,
hipStream_t stream);
template void invokeGenRelativePosBias(half* relative_position_bias,
const half* relative_position_bias_table,
const int* relative_position_bias_index,
const int window_size,
const int head_num,
hipStream_t stream);
template void invokeGenRelativePosBias(float* relative_position_bias,
const float* relative_position_bias_table,
const int64_t* relative_position_bias_index,
const int window_size,
const int head_num,
hipStream_t stream);
template void invokeGenRelativePosBias(half* relative_position_bias,
const half* relative_position_bias_table,
const int64_t* relative_position_bias_index,
const int window_size,
const int head_num,
hipStream_t stream);
__host__ __device__ uint32_t pow2_rounddown(uint32_t x)
{
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x >>= 1;
return x + 1;
}
template<typename T>
__global__ void generate_alibi_slopes(T* alibi_slopes, const size_t num_heads)
{
if (threadIdx.x < num_heads) {
// The nearest power of 2 greater than num_heads followed by HF's implementation.
int num_heads_pow2 = pow2_rounddown(num_heads);
// Loop over the attention head.
for (int h = threadIdx.x; h < num_heads; h += blockDim.x) {
if (h < num_heads_pow2) {
alibi_slopes[h] = static_cast<T>(powf(powf(0.5f, powf(0.5f, log2f(num_heads_pow2) - 3.f)), h + 1));
}
else {
alibi_slopes[h] = static_cast<T>(
powf(powf(0.5f, powf(0.5f, log2f(num_heads_pow2 << 1) - 3.f)), (h - num_heads_pow2) * 2 + 1));
}
}
}
}
template<typename T>
void invokeBuildAlibiSlopes(T* alibi_slopes, const size_t num_heads, hipStream_t stream)
{
// Generate the slopes of a linear attention linear bias.
//
// Paper: https://arxiv.org/abs/2108.12409
// HF's implementation
// https://github.com/huggingface/transformers/blob/56ef0ba44765162f830873c140bd40bdc975cc34/src/transformers/models/bloom/modeling_bloom.py#L86
// Author's implementation
// https://github.com/ofirpress/attention_with_linear_biases/blob/02aa87e7a29e9340efd28d6d169018eafb3aa57a/fairseq/models/transformer.py#L760
//
// alibi_slopes: [num_heads],
// strictly follows how HF implements. which treats power-of-2 heads, and non-power-of-2 heads differently.
// what paper generates differs with HF's when number of heads is not a power of 2.
// num_heads: the number of attention heads.
// stream: a cuda stream.
dim3 block(min((int)num_heads, 512));
hipLaunchKernelGGL(( generate_alibi_slopes), dim3(1), dim3(block), 0, stream, alibi_slopes, num_heads);
}
template void invokeBuildAlibiSlopes(float* alibi_slopes, const size_t num_heads, hipStream_t stream);
template void invokeBuildAlibiSlopes(half* alibi_slopes, const size_t num_heads, hipStream_t stream);
#ifdef ENABLE_BF16
template void invokeBuildAlibiSlopes(__nv_bfloat16* alibi_slopes, const size_t num_heads, hipStream_t stream);
#endif
template void invokeGenRelativePosBiasV2(float* relative_position_bias,
const float* relative_coords_table,
const int* relative_position_bias_index,
const float* cpb_mlp_weight1,
const float* cpb_mlp_bias1,
const float* cpb_mlp_weight2,
const int window_size,
const int cpb_mlp_in_dim,
const int cpb_mlp_out_dim,
const int head_num,
hipStream_t stream);
template void invokeGenRelativePosBiasV2(half* relative_position_bias,
const half* relative_coords_table,
const int* relative_position_bias_index,
const half* cpb_mlp_weight1,
const half* cpb_mlp_bias1,
const half* cpb_mlp_weight2,
const int window_size,
const int cpb_mlp_in_dim,
const int cpb_mlp_out_dim,
const int head_num,
hipStream_t stream);
template void invokeGenRelativePosBiasV2(float* relative_position_bias,
const float* relative_coords_table,
const int64_t* relative_position_bias_index,
const float* cpb_mlp_weight1,
const float* cpb_mlp_bias1,
const float* cpb_mlp_weight2,
const int window_size,
const int cpb_mlp_in_dim,
const int cpb_mlp_out_dim,
const int head_num,
hipStream_t stream);
template void invokeGenRelativePosBiasV2(half* relative_position_bias,
const half* relative_coords_table,
const int64_t* relative_position_bias_index,
const half* cpb_mlp_weight1,
const half* cpb_mlp_bias1,
const half* cpb_mlp_weight2,
const int window_size,
const int cpb_mlp_in_dim,
const int cpb_mlp_out_dim,
const int head_num,
hipStream_t stream);
} // namespace fastertransformer
| 9fdb64b36dc41c10aa7a8589fa4698996995bb51.cu | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cublas_v2.h"
#include "gen_relative_pos_bias.h"
#include "reduce_kernel_utils.cuh"
#include "src/fastertransformer/kernels/activation_kernels.h"
#include "src/fastertransformer/utils/cuda_utils.h"
#include <cstdio>
namespace fastertransformer {
/******************* invokeGenRelativePosBias ***********************/
// relative_position_bias_table is [(2*window_size-1)*(2*window_size-1), headNum]
// relative_position_bias is [head_num, window_size^2, window_size^2]
// grid(window_size*window_size, head_num)
// block(window_size*window_size)
template<typename T, typename Tindex>
__global__ void gen_relative_pos_bias(T* relative_position_bias,
const T* relative_position_bias_table,
const Tindex* relative_position_bias_index,
const int window_size,
const int head_num)
{
const int h_in_window = blockIdx.x / window_size;
const int w_in_window = blockIdx.x % window_size;
const int h_in_token = threadIdx.x / window_size;
const int w_in_token = threadIdx.x % window_size;
const int head_idx = blockIdx.y;
const int elements_per_window = window_size * window_size;
const size_t elements_per_window_2 = elements_per_window * elements_per_window;
const size_t output_idx = head_idx * elements_per_window_2 + blockIdx.x * elements_per_window + threadIdx.x;
if (output_idx < head_num * elements_per_window_2) {
const Tindex idx_in_table =
relative_position_bias_index[(h_in_window * window_size + w_in_window) * elements_per_window
+ h_in_token * window_size + w_in_token];
relative_position_bias[output_idx] = relative_position_bias_table[idx_in_table * head_num + head_idx];
}
}
template<typename T, typename Tindex>
void invokeGenRelativePosBias(T* relative_position_bias,
const T* relative_position_bias_table,
const Tindex* relative_position_bias_index,
const int window_size,
const int head_num,
cudaStream_t stream)
{
dim3 grid(window_size * window_size, head_num);
dim3 block(window_size * window_size);
if (block.x > 1024) {
printf("[ERROR][invokeGenRelativePosBias] window_size*window_size > 1024.\n");
exit(-1);
}
gen_relative_pos_bias<<<grid, block, 0, stream>>>(
relative_position_bias, relative_position_bias_table, relative_position_bias_index, window_size, head_num);
}
/******************* invokeGenRelativePosBiasV2 ***********************/
template<typename T, typename Tindex>
void invokeGenRelativePosBiasV2(T* relative_position_bias,
const T* relative_coords_table,
const Tindex* relative_position_bias_index,
const T* cpb_mlp_weight1,
const T* cpb_mlp_bias1,
const T* cpb_mlp_weight2,
const int window_size,
const int cpb_mlp_in_dim,
const int cpb_mlp_out_dim,
const int head_num,
cudaStream_t stream)
{
dim3 grid(window_size * window_size, head_num);
dim3 block(window_size * window_size);
if (block.x > 1024) {
printf("[ERROR][invokeGenRelativePosBias] window_size*window_size > 1024.\n");
exit(-1);
}
T* relative_position_bias_table;
check_cuda_error(cudaMalloc(&relative_position_bias_table,
((2 * window_size - 1) * (2 * window_size - 1) * head_num) * sizeof(T)));
T* cpb_mlp_1;
check_cuda_error(
cudaMalloc(&cpb_mlp_1, ((2 * window_size - 1) * (2 * window_size - 1) * cpb_mlp_out_dim) * sizeof(T)));
cublasHandle_t cublas_handle;
check_cuda_error(cublasCreate(&cublas_handle));
int m = (2 * window_size - 1) * (2 * window_size - 1);
T alpha = (T)1.0f;
T beta = (T)0.0f;
cudaDataType_t type = std::is_same<float, T>::value ? CUDA_R_32F : CUDA_R_16F;
#if (CUDART_VERSION >= 11000)
cublasComputeType_t compute_type = std::is_same<float, T>::value ? CUBLAS_COMPUTE_32F : CUBLAS_COMPUTE_16F;
#else
cudaDataType_t compute_type = std::is_same<float, T>::value ? CUDA_R_32F : CUDA_R_16F;
#endif
cublasGemmAlgo_t algo = std::is_same<float, T>::value ? CUBLAS_GEMM_DEFAULT : CUBLAS_GEMM_DEFAULT_TENSOR_OP;
check_cuda_error(cublasGemmEx(cublas_handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
cpb_mlp_out_dim,
m,
cpb_mlp_in_dim,
&alpha,
cpb_mlp_weight1,
type,
cpb_mlp_in_dim,
relative_coords_table,
type,
cpb_mlp_in_dim,
&beta,
cpb_mlp_1,
type,
cpb_mlp_out_dim,
compute_type,
algo));
invokeGenericActivation<ReluActivation, T, T>(
cpb_mlp_1, cpb_mlp_bias1, nullptr, nullptr, nullptr, nullptr, m, cpb_mlp_out_dim, 0, nullptr, nullptr, stream);
check_cuda_error(cublasGemmEx(cublas_handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
head_num,
m,
cpb_mlp_out_dim,
&alpha,
cpb_mlp_weight2,
type,
cpb_mlp_out_dim,
cpb_mlp_1,
type,
cpb_mlp_out_dim,
&beta,
relative_position_bias_table,
type,
head_num,
compute_type,
algo));
gen_relative_pos_bias<<<grid, block, 0, stream>>>(
relative_position_bias, relative_position_bias_table, relative_position_bias_index, window_size, head_num);
invokeSigmoid(
relative_position_bias, window_size * window_size * window_size * window_size * head_num, 16.0f, stream);
check_cuda_error(cudaFree(relative_position_bias_table));
check_cuda_error(cudaFree(cpb_mlp_1));
check_cuda_error(cublasDestroy(cublas_handle));
}
/******************* instantiation ***********************/
template void invokeGenRelativePosBias(float* relative_position_bias,
const float* relative_position_bias_table,
const int* relative_position_bias_index,
const int window_size,
const int head_num,
cudaStream_t stream);
template void invokeGenRelativePosBias(half* relative_position_bias,
const half* relative_position_bias_table,
const int* relative_position_bias_index,
const int window_size,
const int head_num,
cudaStream_t stream);
template void invokeGenRelativePosBias(float* relative_position_bias,
const float* relative_position_bias_table,
const int64_t* relative_position_bias_index,
const int window_size,
const int head_num,
cudaStream_t stream);
template void invokeGenRelativePosBias(half* relative_position_bias,
const half* relative_position_bias_table,
const int64_t* relative_position_bias_index,
const int window_size,
const int head_num,
cudaStream_t stream);
__host__ __device__ uint32_t pow2_rounddown(uint32_t x)
{
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x >>= 1;
return x + 1;
}
template<typename T>
__global__ void generate_alibi_slopes(T* alibi_slopes, const size_t num_heads)
{
if (threadIdx.x < num_heads) {
// The nearest power of 2 greater than num_heads followed by HF's implementation.
int num_heads_pow2 = pow2_rounddown(num_heads);
// Loop over the attention head.
for (int h = threadIdx.x; h < num_heads; h += blockDim.x) {
if (h < num_heads_pow2) {
alibi_slopes[h] = static_cast<T>(powf(powf(0.5f, powf(0.5f, log2f(num_heads_pow2) - 3.f)), h + 1));
}
else {
alibi_slopes[h] = static_cast<T>(
powf(powf(0.5f, powf(0.5f, log2f(num_heads_pow2 << 1) - 3.f)), (h - num_heads_pow2) * 2 + 1));
}
}
}
}
template<typename T>
void invokeBuildAlibiSlopes(T* alibi_slopes, const size_t num_heads, cudaStream_t stream)
{
// Generate the slopes of a linear attention linear bias.
//
// Paper: https://arxiv.org/abs/2108.12409
// HF's implementation
// https://github.com/huggingface/transformers/blob/56ef0ba44765162f830873c140bd40bdc975cc34/src/transformers/models/bloom/modeling_bloom.py#L86
// Author's implementation
// https://github.com/ofirpress/attention_with_linear_biases/blob/02aa87e7a29e9340efd28d6d169018eafb3aa57a/fairseq/models/transformer.py#L760
//
// alibi_slopes: [num_heads],
// strictly follows how HF implements. which treats power-of-2 heads, and non-power-of-2 heads differently.
// what paper generates differs with HF's when number of heads is not a power of 2.
// num_heads: the number of attention heads.
// stream: a cuda stream.
dim3 block(min((int)num_heads, 512));
generate_alibi_slopes<<<1, block, 0, stream>>>(alibi_slopes, num_heads);
}
template void invokeBuildAlibiSlopes(float* alibi_slopes, const size_t num_heads, cudaStream_t stream);
template void invokeBuildAlibiSlopes(half* alibi_slopes, const size_t num_heads, cudaStream_t stream);
#ifdef ENABLE_BF16
template void invokeBuildAlibiSlopes(__nv_bfloat16* alibi_slopes, const size_t num_heads, cudaStream_t stream);
#endif
template void invokeGenRelativePosBiasV2(float* relative_position_bias,
const float* relative_coords_table,
const int* relative_position_bias_index,
const float* cpb_mlp_weight1,
const float* cpb_mlp_bias1,
const float* cpb_mlp_weight2,
const int window_size,
const int cpb_mlp_in_dim,
const int cpb_mlp_out_dim,
const int head_num,
cudaStream_t stream);
template void invokeGenRelativePosBiasV2(half* relative_position_bias,
const half* relative_coords_table,
const int* relative_position_bias_index,
const half* cpb_mlp_weight1,
const half* cpb_mlp_bias1,
const half* cpb_mlp_weight2,
const int window_size,
const int cpb_mlp_in_dim,
const int cpb_mlp_out_dim,
const int head_num,
cudaStream_t stream);
template void invokeGenRelativePosBiasV2(float* relative_position_bias,
const float* relative_coords_table,
const int64_t* relative_position_bias_index,
const float* cpb_mlp_weight1,
const float* cpb_mlp_bias1,
const float* cpb_mlp_weight2,
const int window_size,
const int cpb_mlp_in_dim,
const int cpb_mlp_out_dim,
const int head_num,
cudaStream_t stream);
template void invokeGenRelativePosBiasV2(half* relative_position_bias,
const half* relative_coords_table,
const int64_t* relative_position_bias_index,
const half* cpb_mlp_weight1,
const half* cpb_mlp_bias1,
const half* cpb_mlp_weight2,
const int window_size,
const int cpb_mlp_in_dim,
const int cpb_mlp_out_dim,
const int head_num,
cudaStream_t stream);
} // namespace fastertransformer
|
914bcaad8fe618dbfed90c59dfa79cefe25bc45b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_1d_layer_tester_cuda_fermi.h"
#include <hip/hip_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../convolution_layer.h"
texture<float, hipTextureType1D, hipReadModeElementType> input_tex_ref;
#define FEATURE_MAP_BLOCK_SIZE 4
template<int BLOCK_SIZE>
__global__ void convolution_1d_tex_blocked_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
int output_width,
int input_width,
int window_width,
int input_feature_map_count,
int output_feature_map_count,
int entry_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int output_feature_map_id = (blockIdx.y * blockDim.y + threadIdx.y) * FEATURE_MAP_BLOCK_SIZE;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (output_feature_map_id < output_feature_map_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = window_width * input_feature_map_count;
int input_elem_id = entry_id * input_feature_map_count * input_width + x;
const float * current_weights = weights + (int)(weight_count_per_output_feature_map * output_feature_map_id);
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
}
input_elem_id += input_width - window_width;
}
float * base_output = output + (entry_id * output_feature_map_count + output_feature_map_id) * output_width + x;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_width * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE>
__global__ void convolution_1d_tex_exact_blocked_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
int output_width,
int input_width,
int input_feature_map_count,
int output_feature_map_count,
int entry_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int output_feature_map_id = (blockIdx.y * blockDim.y + threadIdx.y) * FEATURE_MAP_BLOCK_SIZE;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (output_feature_map_id < output_feature_map_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = WINDOW_WIDTH * input_feature_map_count;
int input_elem_id = entry_id * input_feature_map_count * input_width + x;
const float * current_weights = weights + (int)(weight_count_per_output_feature_map * output_feature_map_id);
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
}
input_elem_id += input_width - WINDOW_WIDTH;
}
float * base_output = output + (entry_id * output_feature_map_count + output_feature_map_id) * output_width + x;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_width * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
namespace nnforge
{
namespace cuda
{
convolution_1d_layer_tester_cuda_fermi::convolution_1d_layer_tester_cuda_fermi()
{
input_tex_ref.addressMode[0] = hipAddressModeBorder;
input_tex_ref.normalized = false;
}
convolution_1d_layer_tester_cuda_fermi::~convolution_1d_layer_tester_cuda_fermi()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const) \
hipLaunchKernelGGL(( convolution_1d_tex_exact_blocked_kernel_fermi<window_width_const,block_size_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *additional_buffers[0], *data[0], *data[1], output_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[0], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count);
#define launch_exact_kernel_const(window_width, block_size_const) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const); \
break; \
};
#define launch_exact_kernel(window_width, block_size) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5); \
break; \
};
#define launch_kernel_const(block_size_const) \
hipLaunchKernelGGL(( convolution_1d_tex_blocked_kernel_fermi<block_size_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *additional_buffers[0], *data[0], *data[1], output_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[0], window_sizes[0], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count);
#define launch_kernel(block_size) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1); \
break; \
case 2: \
launch_kernel_const(2); \
break; \
case 3: \
launch_kernel_const(3); \
break; \
case 4: \
launch_kernel_const(4); \
break; \
case 5: \
launch_kernel_const(5); \
break; \
};
void convolution_1d_layer_tester_cuda_fermi::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
cuda_safe_call(hipBindTexture(0, input_tex_ref, *input_buffer, desc, input_elem_count_per_entry * entry_count * sizeof(float)));
int block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
(output_configuration_specific.dimension_sizes[0] + block_size - 1) / block_size,
((output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE),
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
launch_exact_kernel(window_sizes[0], block_size);
}
else
{
launch_kernel(block_size);
}
}
int convolution_1d_layer_tester_cuda_fermi::get_block_size(int output_width)
{
int block_count = (output_width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (output_width + block_count - 1) / block_count;
return block_size;
}
void convolution_1d_layer_tester_cuda_fermi::tester_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
}
std::vector<size_t> convolution_1d_layer_tester_cuda_fermi::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
std::vector<unsigned int> convolution_1d_layer_tester_cuda_fermi::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
return res;
}
cuda_linear_buffer_device_smart_ptr convolution_1d_layer_tester_cuda_fermi::get_output_buffer(
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers)
{
return additional_buffers[0];
}
}
}
| 914bcaad8fe618dbfed90c59dfa79cefe25bc45b.cu | /*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_1d_layer_tester_cuda_fermi.h"
#include <cuda_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../convolution_layer.h"
texture<float, cudaTextureType1D, cudaReadModeElementType> input_tex_ref;
#define FEATURE_MAP_BLOCK_SIZE 4
template<int BLOCK_SIZE>
__global__ void convolution_1d_tex_blocked_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
int output_width,
int input_width,
int window_width,
int input_feature_map_count,
int output_feature_map_count,
int entry_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int output_feature_map_id = (blockIdx.y * blockDim.y + threadIdx.y) * FEATURE_MAP_BLOCK_SIZE;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (output_feature_map_id < output_feature_map_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = window_width * input_feature_map_count;
int input_elem_id = entry_id * input_feature_map_count * input_width + x;
const float * current_weights = weights + (int)(weight_count_per_output_feature_map * output_feature_map_id);
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
}
input_elem_id += input_width - window_width;
}
float * base_output = output + (entry_id * output_feature_map_count + output_feature_map_id) * output_width + x;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_width * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE>
__global__ void convolution_1d_tex_exact_blocked_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
int output_width,
int input_width,
int input_feature_map_count,
int output_feature_map_count,
int entry_count)
{
int x = (blockIdx.x * blockDim.x + threadIdx.x) * BLOCK_SIZE;
int output_feature_map_id = (blockIdx.y * blockDim.y + threadIdx.y) * FEATURE_MAP_BLOCK_SIZE;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (x < output_width) && (output_feature_map_id < output_feature_map_count);
if (in_bounds)
{
int weight_count_per_output_feature_map = WINDOW_WIDTH * input_feature_map_count;
int input_elem_id = entry_id * input_feature_map_count * input_width + x;
const float * current_weights = weights + (int)(weight_count_per_output_feature_map * output_feature_map_id);
float bias_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
bias_list[i] = biases[output_feature_map_id + i];
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = bias_list[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int input_layer_id = 0; input_layer_id < input_feature_map_count; ++input_layer_id)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
}
input_elem_id += input_width - WINDOW_WIDTH;
}
float * base_output = output + (entry_id * output_feature_map_count + output_feature_map_id) * output_width + x;
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[j + output_width * i] = sums[i * BLOCK_SIZE + j];
}
}
}
}
}
namespace nnforge
{
namespace cuda
{
convolution_1d_layer_tester_cuda_fermi::convolution_1d_layer_tester_cuda_fermi()
{
input_tex_ref.addressMode[0] = cudaAddressModeBorder;
input_tex_ref.normalized = false;
}
convolution_1d_layer_tester_cuda_fermi::~convolution_1d_layer_tester_cuda_fermi()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const) \
convolution_1d_tex_exact_blocked_kernel_fermi<window_width_const,block_size_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*additional_buffers[0], *data[0], *data[1], output_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[0], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count);
#define launch_exact_kernel_const(window_width, block_size_const) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const); \
break; \
};
#define launch_exact_kernel(window_width, block_size) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5); \
break; \
};
#define launch_kernel_const(block_size_const) \
convolution_1d_tex_blocked_kernel_fermi<block_size_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*additional_buffers[0], *data[0], *data[1], output_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[0], window_sizes[0], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, entry_count);
#define launch_kernel(block_size) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1); \
break; \
case 2: \
launch_kernel_const(2); \
break; \
case 3: \
launch_kernel_const(3); \
break; \
case 4: \
launch_kernel_const(4); \
break; \
case 5: \
launch_kernel_const(5); \
break; \
};
void convolution_1d_layer_tester_cuda_fermi::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cuda_safe_call(cudaBindTexture(0, input_tex_ref, *input_buffer, desc, input_elem_count_per_entry * entry_count * sizeof(float)));
int block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
(output_configuration_specific.dimension_sizes[0] + block_size - 1) / block_size,
((output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE),
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
launch_exact_kernel(window_sizes[0], block_size);
}
else
{
launch_kernel(block_size);
}
}
int convolution_1d_layer_tester_cuda_fermi::get_block_size(int output_width)
{
int block_count = (output_width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (output_width + block_count - 1) / block_count;
return block_size;
}
void convolution_1d_layer_tester_cuda_fermi::tester_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
}
std::vector<size_t> convolution_1d_layer_tester_cuda_fermi::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
std::vector<unsigned int> convolution_1d_layer_tester_cuda_fermi::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
return res;
}
cuda_linear_buffer_device_smart_ptr convolution_1d_layer_tester_cuda_fermi::get_output_buffer(
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers)
{
return additional_buffers[0];
}
}
}
|
78bea90b88144f19a1567e5b1bc939ee2adc3ea9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include <vector>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
#include <math.h>
#define pi 3.1415926535
using namespace cv;
using std::cout;
using std::endl;
__global__ void binarykernel(uchar *dinput1,uchar *dinput2, uchar *dinput3, uchar *dinput4, uchar *dinput5,double doutbinary1[][1024],double doutbinary2[][1024],double doutbinary3[][1024],double doutbinary4[][1024],double doutbinary5[][1024] )
{
int labelx = (blockIdx.x * blockDim.x + threadIdx.x);
int labely = (blockIdx.y * blockDim.y + threadIdx.y);
double p1 = dinput1[1280*labely+labelx];
doutbinary1[labelx][labely]=(p1 > 255*0.3) ? 1 : 0;
double p2 = dinput2[1280*labely+labelx];
doutbinary2[labelx][labely]=(p2> 255*0.3) ? 1 : 0;
double p3 = dinput3[1280*labely+labelx];
doutbinary3[labelx][labely]=(p3 > 255*0.3) ? 1 : 0;
double p4 = dinput4[1280*labely+labelx];
doutbinary4[labelx][labely]=(p4 > 255*0.3) ? 1 : 0;
double p5 = dinput5[1280*labely+labelx];
doutbinary5[labelx][labely]=(p5 > 255*0.3) ? 1 : 0;
}
__global__ void phasewrapkernel(uchar *dph1, uchar *dph2, uchar *dph3, uchar *dph4, double dphasewrap[][1024])
{
int labelx = (blockIdx.x * blockDim.x + threadIdx.x);
int labely = (blockIdx.y * blockDim.y + threadIdx.y);
double doutputsin = 0;
double doutputcos = 0;
dphasewrap[labelx][labely] = 0;
double p1 = dph1[1280*labely+labelx];
doutputsin += p1*sin(pi/2);
doutputcos += p1*cos(pi/2);
double p2 = dph2[1280*labely+labelx];
doutputsin += p2*sin(2*pi/2);
doutputcos += p2*cos(2*pi/2);
double p3 = dph3[1280*labely+labelx];
doutputsin += p3*sin(3*pi/2);
doutputcos += p3*cos(3*pi/2);
double p4 = dph4[1280*labely+labelx];
doutputsin += p4*sin(4*pi/2);
doutputcos += p4*cos(4*pi/2);
dphasewrap[labelx][labely] = atan2(doutputsin,doutputcos);
}
__global__ void graykernel(double dG1[][1024],double dG2[][1024],double dG3[][1024],double dG4[][1024],double dG5[][1024],double dgraycode[][1024])
{
int labelx = (blockIdx.x * blockDim.x + threadIdx.x);
int labely = (blockIdx.y * blockDim.y + threadIdx.y);
int gray1 = dG1[labelx][labely];
int gray2 = dG2[labelx][labely];
int gray3 = dG3[labelx][labely];
int gray4 = dG4[labelx][labely];
int gray5 = dG5[labelx][labely];
dgraycode[labelx][labely] = gray1*16+(gray1^gray2)*8+((gray1^gray2)^gray3)*4+(((gray1^gray2)^gray3)^gray4)*2+(((gray1^gray2)^gray3)^gray4)^gray5;
}
__global__ void constructimgkernel(double doutbinary1[][1024],double doutbinary2[][1024],double doutbinary3[][1024],double doutbinary4[][1024],double doutbinary5[][1024] )
{
int labelx = (blockIdx.x * blockDim.x + threadIdx.x);
int labely = (blockIdx.y * blockDim.y + threadIdx.y);
double objphase = doutbinary1[labelx][labely];
double objgray = doutbinary2[labelx][labely];
double platephase = doutbinary3[labelx][labely];
double plategray = doutbinary4[labelx][labely];
doutbinary5[labelx][labely] = objphase+objgray*2*pi-platephase-plategray*2*pi;
}
__global__ void MedianFilter(double In[][1024],double Out[][1024])
{
double window[9];
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if(x>= 1280 && y>= 1024) return;
window[0]=(y==0||x==0)?0:In[x-1][y-1];
window[1]=(y==0)?0:In[x][y-1];
window[2]=(y==0||x==1279)? 0:In[x+1][y-1];
window[3]=(x==0)? 0:In[x-1][y];
window[4]= In[x][y];
window[5]=(x==1279)? 0:In[x+1][y];
window[6]=(y==1023||x==0)? 0:In[x-1][y+1];
window[7]=(y==1023)? 0:In[x][y+1];
window[8]=(y==1023||x==1279)? 0:In[x+1][y+1];
for (unsigned int j=0; j<5; ++j)
{
int min=j;
for (unsigned int l=j+1; l<9; ++l)
if (window[l] < window[min])
min=l;
double temp=window[j];
window[j]=window[min];
window[min]=temp;
}
Out[x][y]=window[4];
}
int main()
{
int i=0,j=0;
Mat *objectgray=new Mat[5];
Mat *plategray=new Mat[5];
Mat *objectphase=new Mat[4];
Mat *platephase=new Mat[4];
dim3 grid( 64, 64 ), threads( 20, 16 );
for(i=0;i<5;i++){
objectgray[i] = imread( format( "Capture%d.bmp",i+1),0);
plategray[i] = imread( format( "grayplate%d.bmp",i+1),0);
}
uchar *objectgray1 = objectgray[0].data;
uchar *objectgray2 = objectgray[1].data;
uchar *objectgray3 = objectgray[2].data;
uchar *objectgray4 = objectgray[3].data;
uchar *objectgray5 = objectgray[4].data;
uchar *plategray1 = plategray[0].data;
uchar *plategray2 = plategray[1].data;
uchar *plategray3 = plategray[2].data;
uchar *plategray4 = plategray[3].data;
uchar *plategray5 = plategray[4].data;
double (*objectbw1)[1024] = new double[1280][1024];
double (*objectbw2)[1024] = new double[1280][1024];
double (*objectbw3)[1024] = new double[1280][1024];
double (*objectbw4)[1024] = new double[1280][1024];
double (*objectbw5)[1024] = new double[1280][1024];
double (*platebw1)[1024] = new double[1280][1024];
double (*platebw2)[1024] = new double[1280][1024];
double (*platebw3)[1024] = new double[1280][1024];
double (*platebw4)[1024] = new double[1280][1024];
double (*platebw5)[1024] = new double[1280][1024];
double (*graycodeobject)[1024] = new double[1280][1024];
double (*graycodeplate)[1024] = new double[1280][1024];
uchar *dinput1,*dinput2,*dinput3,*dinput4,*dinput5;
double (*doutbinary1)[1024],(*doutbinary2)[1024],(*doutbinary3)[1024],(*doutbinary4)[1024],(*doutbinary5)[1024];
double (*dgraycode)[1024];
hipMalloc((void**)&dinput1,1280*1024*sizeof(uchar));
hipMalloc((void**)&dinput2,1280*1024*sizeof(uchar));
hipMalloc((void**)&dinput3,1280*1024*sizeof(uchar));
hipMalloc((void**)&dinput4,1280*1024*sizeof(uchar));
hipMalloc((void**)&dinput5,1280*1024*sizeof(uchar));
hipMalloc((void**)&doutbinary1,1280*1024*sizeof(double));
hipMalloc((void**)&doutbinary2,1280*1024*sizeof(double));
hipMalloc((void**)&doutbinary3,1280*1024*sizeof(double));
hipMalloc((void**)&doutbinary4,1280*1024*sizeof(double));
hipMalloc((void**)&doutbinary5,1280*1024*sizeof(double));
hipMalloc((void**)&dgraycode,1280*1024*sizeof(double));
hipMemcpy( dinput1, objectgray1, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dinput2, objectgray2, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dinput3, objectgray3, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dinput4, objectgray4, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dinput5, objectgray5, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipLaunchKernelGGL(( binarykernel), dim3(grid), dim3(threads), 0, 0, dinput1,dinput2,dinput3,dinput4,dinput5,doutbinary1,doutbinary2,doutbinary3,doutbinary4,doutbinary5);
hipLaunchKernelGGL(( graykernel), dim3(grid), dim3(threads), 0, 0, doutbinary1,doutbinary2,doutbinary3,doutbinary4,doutbinary5,dgraycode);
hipMemcpy( graycodeobject, dgraycode, 1280*1024*sizeof(double), hipMemcpyDeviceToHost ) ;
hipMemcpy( dinput1, plategray1, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dinput2, plategray2, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dinput3, plategray3, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dinput4, plategray4, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dinput5, plategray5, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipLaunchKernelGGL(( binarykernel), dim3(grid), dim3(threads), 0, 0, dinput1,dinput2,dinput3,dinput4,dinput5,doutbinary1,doutbinary2,doutbinary3,doutbinary4,doutbinary5);
hipLaunchKernelGGL(( graykernel), dim3(grid), dim3(threads), 0, 0, doutbinary1,doutbinary2,doutbinary3,doutbinary4,doutbinary5,dgraycode);
hipMemcpy( graycodeplate, dgraycode, 1280*1024*sizeof(double), hipMemcpyDeviceToHost );
hipFree(dinput1);
hipFree(dinput2);
hipFree(dinput3);
hipFree(dinput4);
hipFree(dinput5);
hipFree(doutbinary1);
hipFree(doutbinary2);
hipFree(doutbinary3);
hipFree(doutbinary4);
hipFree(doutbinary5);
hipFree(dgraycode);
for(j=0;j<4;j++){
objectphase[j] = imread( format( "Capture0%d.bmp",j+1),0);
platephase[j] = imread( format( "plate%d.bmp",j+1),0);
}
uchar *objphase1 = objectphase[0].data;
uchar *objphase2 = objectphase[1].data;
uchar *objphase3 = objectphase[2].data;
uchar *objphase4 = objectphase[3].data;
uchar *platephase1 = platephase[0].data;
uchar *platephase2 = platephase[1].data;
uchar *platephase3 = platephase[2].data;
uchar *platephase4 = platephase[3].data;
double (*objphasewrap)[1024] = new double[1280][1024];
double (*platephasewrap)[1024] = new double[1280][1024];
uchar *dph1,*dph2,*dph3,*dph4;
double (*dphasewrap)[1024];
hipMalloc((void**)&dph1,1280*1024*sizeof(uchar));
hipMalloc((void**)&dph2,1280*1024*sizeof(uchar));
hipMalloc((void**)&dph3,1280*1024*sizeof(uchar));
hipMalloc((void**)&dph4,1280*1024*sizeof(uchar));
hipMalloc((void**)&dphasewrap,1280*1024*sizeof(double));
hipMemcpy( dph1, objphase1, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dph2, objphase2, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dph3, objphase3, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dph4, objphase4, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipLaunchKernelGGL(( phasewrapkernel), dim3(grid), dim3(threads), 0, 0, dph1,dph2,dph3,dph4,dphasewrap);
hipMemcpy( objphasewrap, dphasewrap, 1280*1024*sizeof(double), hipMemcpyDeviceToHost ) ;
hipMemcpy( dph1, platephase1, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dph2, platephase2, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dph3, platephase3, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipMemcpy( dph4, platephase4, 1280*1024*sizeof(uchar), hipMemcpyHostToDevice ) ;
hipLaunchKernelGGL(( phasewrapkernel), dim3(grid), dim3(threads), 0, 0, dph1,dph2,dph3,dph4,dphasewrap);
hipMemcpy( platephasewrap, dphasewrap, 1280*1024*sizeof(double), hipMemcpyDeviceToHost ) ;
hipFree(dph1);
hipFree(dph2);
hipFree(dph3);
hipFree(dph4);
hipFree(dphasewrap);
for (i=0;i<1280;i++){
for (j=0;j<1023;j++){
if ((graycodeobject[i][j]==graycodeobject[i][j+1])&&(objphasewrap[i][j+1]-objphasewrap[i][j]>=pi))
graycodeobject[i][j+1]=graycodeobject[i][j+1]-1;
else if ((graycodeobject[i][j]==graycodeobject[i][j+1]+1)&&(objphasewrap[i][j+1]-objphasewrap[i][j]<pi))
graycodeobject[i][j+1]=graycodeobject[i][j+1]+1;
else if (graycodeobject[i][j]==graycodeobject[i][j+1]-1)
graycodeobject[i][j+1]=graycodeobject[i][j];
}
}
for (i=0;i<1280;i++){
for (j=0;j<1023;j++){
if ((graycodeplate[i][j]==graycodeplate[i][j+1])&&(platephasewrap[i][j+1]-platephasewrap[i][j]>=pi))
graycodeplate[i][j+1]=graycodeplate[i][j+1]-1;
else if ((graycodeplate[i][j]==graycodeplate[i][j+1]+1)&&(platephasewrap[i][j+1]-platephasewrap[i][j]<pi))
graycodeplate[i][j+1]=graycodeplate[i][j+1]+1;
else if (graycodeplate[i][j]==graycodeplate[i][j+1]-1)
graycodeplate[i][j+1]=graycodeplate[i][j];
}
}
double (*imgoutput)[1024] = new double[1280][1024];
double (*imgafterfilter)[1024] = new double[1280][1024];
hipMalloc((void**)&doutbinary1,1280*1024*sizeof(double));
hipMalloc((void**)&doutbinary2,1280*1024*sizeof(double));
hipMalloc((void**)&doutbinary3,1280*1024*sizeof(double));
hipMalloc((void**)&doutbinary4,1280*1024*sizeof(double));
hipMalloc((void**)&doutbinary5,1280*1024*sizeof(double));
hipMemcpy( doutbinary1, objphasewrap, 1280*1024*sizeof(double), hipMemcpyHostToDevice ) ;
hipMemcpy( doutbinary2, graycodeobject, 1280*1024*sizeof(double), hipMemcpyHostToDevice ) ;
hipMemcpy( doutbinary3, platephasewrap, 1280*1024*sizeof(double), hipMemcpyHostToDevice ) ;
hipMemcpy( doutbinary4, graycodeplate, 1280*1024*sizeof(double), hipMemcpyHostToDevice ) ;
hipLaunchKernelGGL(( constructimgkernel), dim3(grid), dim3(threads), 0, 0, doutbinary1,doutbinary2,doutbinary3,doutbinary4,doutbinary5);
hipMemcpy( imgoutput, doutbinary5, 1280*1024*sizeof(double), hipMemcpyDeviceToHost ) ;
hipFree(doutbinary2);
hipFree(doutbinary3);
hipFree(doutbinary4);
hipLaunchKernelGGL(( MedianFilter), dim3(grid), dim3(threads), 0, 0, doutbinary5,doutbinary1);
hipMemcpy( imgafterfilter, doutbinary1, 1280*1024*sizeof(double), hipMemcpyDeviceToHost ) ;
hipFree(doutbinary1);
hipFree(doutbinary5);
std::ofstream outf("out.txt",std::ios::out);
for (i=300;i<700;i++){
for (j=400;j<900;j++){
double z=imgafterfilter[j][i];
outf<<j-400<<" "<<i-300<<" "<<z<<endl;
}
}
outf.close();
}
| 78bea90b88144f19a1567e5b1bc939ee2adc3ea9.cu | #include <stdio.h>
#include "cuda.h"
#include "cublas.h"
#include <vector>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
#include <math.h>
#define pi 3.1415926535
using namespace cv;
using std::cout;
using std::endl;
__global__ void binarykernel(uchar *dinput1,uchar *dinput2, uchar *dinput3, uchar *dinput4, uchar *dinput5,double doutbinary1[][1024],double doutbinary2[][1024],double doutbinary3[][1024],double doutbinary4[][1024],double doutbinary5[][1024] )
{
int labelx = (blockIdx.x * blockDim.x + threadIdx.x);
int labely = (blockIdx.y * blockDim.y + threadIdx.y);
double p1 = dinput1[1280*labely+labelx];
doutbinary1[labelx][labely]=(p1 > 255*0.3) ? 1 : 0;
double p2 = dinput2[1280*labely+labelx];
doutbinary2[labelx][labely]=(p2> 255*0.3) ? 1 : 0;
double p3 = dinput3[1280*labely+labelx];
doutbinary3[labelx][labely]=(p3 > 255*0.3) ? 1 : 0;
double p4 = dinput4[1280*labely+labelx];
doutbinary4[labelx][labely]=(p4 > 255*0.3) ? 1 : 0;
double p5 = dinput5[1280*labely+labelx];
doutbinary5[labelx][labely]=(p5 > 255*0.3) ? 1 : 0;
}
__global__ void phasewrapkernel(uchar *dph1, uchar *dph2, uchar *dph3, uchar *dph4, double dphasewrap[][1024])
{
int labelx = (blockIdx.x * blockDim.x + threadIdx.x);
int labely = (blockIdx.y * blockDim.y + threadIdx.y);
double doutputsin = 0;
double doutputcos = 0;
dphasewrap[labelx][labely] = 0;
double p1 = dph1[1280*labely+labelx];
doutputsin += p1*sin(pi/2);
doutputcos += p1*cos(pi/2);
double p2 = dph2[1280*labely+labelx];
doutputsin += p2*sin(2*pi/2);
doutputcos += p2*cos(2*pi/2);
double p3 = dph3[1280*labely+labelx];
doutputsin += p3*sin(3*pi/2);
doutputcos += p3*cos(3*pi/2);
double p4 = dph4[1280*labely+labelx];
doutputsin += p4*sin(4*pi/2);
doutputcos += p4*cos(4*pi/2);
dphasewrap[labelx][labely] = atan2(doutputsin,doutputcos);
}
__global__ void graykernel(double dG1[][1024],double dG2[][1024],double dG3[][1024],double dG4[][1024],double dG5[][1024],double dgraycode[][1024])
{
int labelx = (blockIdx.x * blockDim.x + threadIdx.x);
int labely = (blockIdx.y * blockDim.y + threadIdx.y);
int gray1 = dG1[labelx][labely];
int gray2 = dG2[labelx][labely];
int gray3 = dG3[labelx][labely];
int gray4 = dG4[labelx][labely];
int gray5 = dG5[labelx][labely];
dgraycode[labelx][labely] = gray1*16+(gray1^gray2)*8+((gray1^gray2)^gray3)*4+(((gray1^gray2)^gray3)^gray4)*2+(((gray1^gray2)^gray3)^gray4)^gray5;
}
__global__ void constructimgkernel(double doutbinary1[][1024],double doutbinary2[][1024],double doutbinary3[][1024],double doutbinary4[][1024],double doutbinary5[][1024] )
{
int labelx = (blockIdx.x * blockDim.x + threadIdx.x);
int labely = (blockIdx.y * blockDim.y + threadIdx.y);
double objphase = doutbinary1[labelx][labely];
double objgray = doutbinary2[labelx][labely];
double platephase = doutbinary3[labelx][labely];
double plategray = doutbinary4[labelx][labely];
doutbinary5[labelx][labely] = objphase+objgray*2*pi-platephase-plategray*2*pi;
}
__global__ void MedianFilter(double In[][1024],double Out[][1024])
{
double window[9];
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
if(x>= 1280 && y>= 1024) return;
window[0]=(y==0||x==0)?0:In[x-1][y-1];
window[1]=(y==0)?0:In[x][y-1];
window[2]=(y==0||x==1279)? 0:In[x+1][y-1];
window[3]=(x==0)? 0:In[x-1][y];
window[4]= In[x][y];
window[5]=(x==1279)? 0:In[x+1][y];
window[6]=(y==1023||x==0)? 0:In[x-1][y+1];
window[7]=(y==1023)? 0:In[x][y+1];
window[8]=(y==1023||x==1279)? 0:In[x+1][y+1];
for (unsigned int j=0; j<5; ++j)
{
int min=j;
for (unsigned int l=j+1; l<9; ++l)
if (window[l] < window[min])
min=l;
double temp=window[j];
window[j]=window[min];
window[min]=temp;
}
Out[x][y]=window[4];
}
int main()
{
int i=0,j=0;
Mat *objectgray=new Mat[5];
Mat *plategray=new Mat[5];
Mat *objectphase=new Mat[4];
Mat *platephase=new Mat[4];
dim3 grid( 64, 64 ), threads( 20, 16 );
for(i=0;i<5;i++){
objectgray[i] = imread( format( "Capture%d.bmp",i+1),0);
plategray[i] = imread( format( "grayplate%d.bmp",i+1),0);
}
uchar *objectgray1 = objectgray[0].data;
uchar *objectgray2 = objectgray[1].data;
uchar *objectgray3 = objectgray[2].data;
uchar *objectgray4 = objectgray[3].data;
uchar *objectgray5 = objectgray[4].data;
uchar *plategray1 = plategray[0].data;
uchar *plategray2 = plategray[1].data;
uchar *plategray3 = plategray[2].data;
uchar *plategray4 = plategray[3].data;
uchar *plategray5 = plategray[4].data;
double (*objectbw1)[1024] = new double[1280][1024];
double (*objectbw2)[1024] = new double[1280][1024];
double (*objectbw3)[1024] = new double[1280][1024];
double (*objectbw4)[1024] = new double[1280][1024];
double (*objectbw5)[1024] = new double[1280][1024];
double (*platebw1)[1024] = new double[1280][1024];
double (*platebw2)[1024] = new double[1280][1024];
double (*platebw3)[1024] = new double[1280][1024];
double (*platebw4)[1024] = new double[1280][1024];
double (*platebw5)[1024] = new double[1280][1024];
double (*graycodeobject)[1024] = new double[1280][1024];
double (*graycodeplate)[1024] = new double[1280][1024];
uchar *dinput1,*dinput2,*dinput3,*dinput4,*dinput5;
double (*doutbinary1)[1024],(*doutbinary2)[1024],(*doutbinary3)[1024],(*doutbinary4)[1024],(*doutbinary5)[1024];
double (*dgraycode)[1024];
cudaMalloc((void**)&dinput1,1280*1024*sizeof(uchar));
cudaMalloc((void**)&dinput2,1280*1024*sizeof(uchar));
cudaMalloc((void**)&dinput3,1280*1024*sizeof(uchar));
cudaMalloc((void**)&dinput4,1280*1024*sizeof(uchar));
cudaMalloc((void**)&dinput5,1280*1024*sizeof(uchar));
cudaMalloc((void**)&doutbinary1,1280*1024*sizeof(double));
cudaMalloc((void**)&doutbinary2,1280*1024*sizeof(double));
cudaMalloc((void**)&doutbinary3,1280*1024*sizeof(double));
cudaMalloc((void**)&doutbinary4,1280*1024*sizeof(double));
cudaMalloc((void**)&doutbinary5,1280*1024*sizeof(double));
cudaMalloc((void**)&dgraycode,1280*1024*sizeof(double));
cudaMemcpy( dinput1, objectgray1, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dinput2, objectgray2, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dinput3, objectgray3, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dinput4, objectgray4, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dinput5, objectgray5, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
binarykernel<<<grid, threads>>>(dinput1,dinput2,dinput3,dinput4,dinput5,doutbinary1,doutbinary2,doutbinary3,doutbinary4,doutbinary5);
graykernel<<<grid, threads>>>(doutbinary1,doutbinary2,doutbinary3,doutbinary4,doutbinary5,dgraycode);
cudaMemcpy( graycodeobject, dgraycode, 1280*1024*sizeof(double), cudaMemcpyDeviceToHost ) ;
cudaMemcpy( dinput1, plategray1, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dinput2, plategray2, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dinput3, plategray3, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dinput4, plategray4, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dinput5, plategray5, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
binarykernel<<<grid, threads>>>(dinput1,dinput2,dinput3,dinput4,dinput5,doutbinary1,doutbinary2,doutbinary3,doutbinary4,doutbinary5);
graykernel<<<grid, threads>>>(doutbinary1,doutbinary2,doutbinary3,doutbinary4,doutbinary5,dgraycode);
cudaMemcpy( graycodeplate, dgraycode, 1280*1024*sizeof(double), cudaMemcpyDeviceToHost );
cudaFree(dinput1);
cudaFree(dinput2);
cudaFree(dinput3);
cudaFree(dinput4);
cudaFree(dinput5);
cudaFree(doutbinary1);
cudaFree(doutbinary2);
cudaFree(doutbinary3);
cudaFree(doutbinary4);
cudaFree(doutbinary5);
cudaFree(dgraycode);
for(j=0;j<4;j++){
objectphase[j] = imread( format( "Capture0%d.bmp",j+1),0);
platephase[j] = imread( format( "plate%d.bmp",j+1),0);
}
uchar *objphase1 = objectphase[0].data;
uchar *objphase2 = objectphase[1].data;
uchar *objphase3 = objectphase[2].data;
uchar *objphase4 = objectphase[3].data;
uchar *platephase1 = platephase[0].data;
uchar *platephase2 = platephase[1].data;
uchar *platephase3 = platephase[2].data;
uchar *platephase4 = platephase[3].data;
double (*objphasewrap)[1024] = new double[1280][1024];
double (*platephasewrap)[1024] = new double[1280][1024];
uchar *dph1,*dph2,*dph3,*dph4;
double (*dphasewrap)[1024];
cudaMalloc((void**)&dph1,1280*1024*sizeof(uchar));
cudaMalloc((void**)&dph2,1280*1024*sizeof(uchar));
cudaMalloc((void**)&dph3,1280*1024*sizeof(uchar));
cudaMalloc((void**)&dph4,1280*1024*sizeof(uchar));
cudaMalloc((void**)&dphasewrap,1280*1024*sizeof(double));
cudaMemcpy( dph1, objphase1, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dph2, objphase2, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dph3, objphase3, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dph4, objphase4, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
phasewrapkernel<<<grid, threads>>>(dph1,dph2,dph3,dph4,dphasewrap);
cudaMemcpy( objphasewrap, dphasewrap, 1280*1024*sizeof(double), cudaMemcpyDeviceToHost ) ;
cudaMemcpy( dph1, platephase1, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dph2, platephase2, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dph3, platephase3, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
cudaMemcpy( dph4, platephase4, 1280*1024*sizeof(uchar), cudaMemcpyHostToDevice ) ;
phasewrapkernel<<<grid, threads>>>(dph1,dph2,dph3,dph4,dphasewrap);
cudaMemcpy( platephasewrap, dphasewrap, 1280*1024*sizeof(double), cudaMemcpyDeviceToHost ) ;
cudaFree(dph1);
cudaFree(dph2);
cudaFree(dph3);
cudaFree(dph4);
cudaFree(dphasewrap);
for (i=0;i<1280;i++){
for (j=0;j<1023;j++){
if ((graycodeobject[i][j]==graycodeobject[i][j+1])&&(objphasewrap[i][j+1]-objphasewrap[i][j]>=pi))
graycodeobject[i][j+1]=graycodeobject[i][j+1]-1;
else if ((graycodeobject[i][j]==graycodeobject[i][j+1]+1)&&(objphasewrap[i][j+1]-objphasewrap[i][j]<pi))
graycodeobject[i][j+1]=graycodeobject[i][j+1]+1;
else if (graycodeobject[i][j]==graycodeobject[i][j+1]-1)
graycodeobject[i][j+1]=graycodeobject[i][j];
}
}
for (i=0;i<1280;i++){
for (j=0;j<1023;j++){
if ((graycodeplate[i][j]==graycodeplate[i][j+1])&&(platephasewrap[i][j+1]-platephasewrap[i][j]>=pi))
graycodeplate[i][j+1]=graycodeplate[i][j+1]-1;
else if ((graycodeplate[i][j]==graycodeplate[i][j+1]+1)&&(platephasewrap[i][j+1]-platephasewrap[i][j]<pi))
graycodeplate[i][j+1]=graycodeplate[i][j+1]+1;
else if (graycodeplate[i][j]==graycodeplate[i][j+1]-1)
graycodeplate[i][j+1]=graycodeplate[i][j];
}
}
double (*imgoutput)[1024] = new double[1280][1024];
double (*imgafterfilter)[1024] = new double[1280][1024];
cudaMalloc((void**)&doutbinary1,1280*1024*sizeof(double));
cudaMalloc((void**)&doutbinary2,1280*1024*sizeof(double));
cudaMalloc((void**)&doutbinary3,1280*1024*sizeof(double));
cudaMalloc((void**)&doutbinary4,1280*1024*sizeof(double));
cudaMalloc((void**)&doutbinary5,1280*1024*sizeof(double));
cudaMemcpy( doutbinary1, objphasewrap, 1280*1024*sizeof(double), cudaMemcpyHostToDevice ) ;
cudaMemcpy( doutbinary2, graycodeobject, 1280*1024*sizeof(double), cudaMemcpyHostToDevice ) ;
cudaMemcpy( doutbinary3, platephasewrap, 1280*1024*sizeof(double), cudaMemcpyHostToDevice ) ;
cudaMemcpy( doutbinary4, graycodeplate, 1280*1024*sizeof(double), cudaMemcpyHostToDevice ) ;
constructimgkernel<<<grid, threads>>>(doutbinary1,doutbinary2,doutbinary3,doutbinary4,doutbinary5);
cudaMemcpy( imgoutput, doutbinary5, 1280*1024*sizeof(double), cudaMemcpyDeviceToHost ) ;
cudaFree(doutbinary2);
cudaFree(doutbinary3);
cudaFree(doutbinary4);
MedianFilter<<<grid, threads>>>(doutbinary5,doutbinary1);
cudaMemcpy( imgafterfilter, doutbinary1, 1280*1024*sizeof(double), cudaMemcpyDeviceToHost ) ;
cudaFree(doutbinary1);
cudaFree(doutbinary5);
std::ofstream outf("out.txt",std::ios::out);
for (i=300;i<700;i++){
for (j=400;j<900;j++){
double z=imgafterfilter[j][i];
outf<<j-400<<" "<<i-300<<" "<<z<<endl;
}
}
outf.close();
}
|
6a446e91b13207deb95940e3a3205d90fa02f2b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <bits/stdc++.h>
#include <IL/il.h>
#include <IL/ilu.h>
#define BLOCK_SIZE 32
#define HYPERm 2
#define HYPERk 4
#define LAMBDA 2
using namespace std;
__device__ unsigned long long B_function(int x, int y){
// return (x - y) * (x - y);
return abs(x - y);
}
__device__ unsigned long long R_function(int x, int y){
//Object
if(y == 1){
return 1;
}
return 2;
}
struct Pixel{
int pixel_value, hard_constraint, height;
unsigned long long neighbor_capacities[10]; //Stored in row major form, followed by source and sink
unsigned long long neighbor_flows[10];
unsigned long long int excess;
bool is_active;
Pixel(){
this -> hard_constraint = 0;
this -> height = 0;
this -> excess = 0;
this -> is_active = false;
}
};
struct Terminal{
unsigned long long int excess;
bool is_active;
int height;
Terminal(){
this -> is_active = false;
this -> height = 0;
this -> excess = 0;
}
};
void saveImage(const char* filename, int width, int height, unsigned char * bitmap){
ILuint imageID = ilGenImage();
ilBindImage(imageID);
ilTexImage(width, height, 0, 1, IL_LUMINANCE, IL_UNSIGNED_BYTE, bitmap);
iluFlipImage();
ilEnable(IL_FILE_OVERWRITE);
ilSave(IL_PNG, filename);
fprintf(stderr, "Image saved as: %s\n", filename);
}
ILuint loadImage(const char *filename, unsigned char ** bitmap, int &width, int &height){
ILuint imageID = ilGenImage();
ilBindImage(imageID);
ILboolean success = ilLoadImage(filename);
if (!success) return 0;
width = ilGetInteger(IL_IMAGE_WIDTH);
height = ilGetInteger(IL_IMAGE_HEIGHT);
printf("Width: %d\t Height: %d\n", width, height);
*bitmap = ilGetData();
return imageID;
}
__global__ void push(Pixel *image_graph, unsigned long long *F, Terminal *source, Terminal *sink, int height, int width, int *convergence_flag){
int i = (threadIdx.x + blockIdx.x * blockDim.x) + 1;
int j = (threadIdx.y + blockDim.y * blockIdx.y) + 1;
if (i <= height && j <= width){
// unsigned long long *neighbor_flows = image_graph[i * width + j].neighbor_flows;
// unsigned long long *neighbor_capacities = image_graph[i * width + j].neighbor_capacities;
unsigned long long excess = image_graph[i * width + j].excess;
// Row major traversal of neighbors of a pixel (i,j)
int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
int thread_flag = 0;
int dest_x, dest_y;
// Check spatial neighbors
for(int l = 0; l < 8; l++){
dest_x = i + x_offsets[l];
dest_y = j + y_offsets[l];
if(image_graph[dest_x * width + dest_y].height + 1 == image_graph[i * width + j].height){
int flow = min(image_graph[i * width + j].neighbor_capacities[l] - image_graph[i * width + j].neighbor_flows[l], excess);
atomicAdd(&(image_graph[i * width + j].excess) , -flow) ;
atomicAdd(&(image_graph[dest_x * width + dest_y].excess), flow) ;
atomicAdd(&(image_graph[i * width + j].neighbor_capacities[l]) , -flow) ;
atomicAdd(&(image_graph[dest_x * width + dest_y].neighbor_capacities[7 - l]), flow) ;
thread_flag = 1;
}
}
unsigned long long flow = min(image_graph[i * width + j].excess, image_graph[i * width + j].neighbor_capacities[9]);
atomicAdd(&image_graph[i * width + j].neighbor_flows[9], flow);
if (image_graph[i * width + j].excess == flow)
atomicAdd(&image_graph[i * width + j].excess, -flow);
atomicAdd(&(sink -> excess), flow);
__syncthreads();
// Update flags
atomicOr(convergence_flag, thread_flag);
// printf("%d ", *convergence_flag);
}
}
// __global__ void pull(Pixel *image_graph, unsigned long long *F, Terminal *source, Terminal *sink, int height, int width){
// int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
// int j = threadIdx.y + blockDim.y * blockIdx.y + 1;
// // Should be <=, but fails for that
// if (i < height && j < width){
// unsigned long long aggregate_flow = 0;
// // Row major traversal of neighbors of a pixel (i,j)
// int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
// int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
// int dest_x, dest_y;
// // Check spatial neighbors
// for(int k = 0; k < 8; k++){
// dest_x = i + x_offsets[k];
// dest_y = j + y_offsets[k];
// aggregate_flow += F[dest_x * width + dest_y];
// }
// aggregate_flow += source->excess;
// // aggregate_flow += source->excess;
// image_graph[i * width + j].excess += aggregate_flow;
// }
// }
// __global__ void localRelabel(Pixel *image_graph, Terminal *source, Terminal *sink, int height, int width){
// int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
// int j = threadIdx.y + blockDim.y * blockIdx.y + 1;
// int locali = (i - 1) % BLOCK_SIZE, localj = (j - 1) % BLOCK_SIZE;
// if (i <= height && j <= width){
// __shared__ int shared_heights[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
// // __shared__ bool shared_flags[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
// shared_heights[locali + 1][localj + 1] = image_graph[i * width + j].height;
// // shared_flags[locali + 1][localj + 1] = image_graph[i * width + j].is_active;
// //Boundary pixels of grid
// if(locali == 0){
// shared_heights[0][localj + 1] = image_graph[(i - 1) * width + j].height;
// if(localj == 0){
// shared_heights[0][0] = image_graph[(i - 1) * width + (j - 1)].height;
// }
// else if(localj == BLOCK_SIZE - 1){
// shared_heights[0][BLOCK_SIZE + 1] = image_graph[(i - 1) * width + (j + 1)].height;
// }
// }
// else if(locali == BLOCK_SIZE - 1){
// shared_heights[BLOCK_SIZE + 1][localj + 1] = image_graph[(i + 1) * width + j].height;
// if(localj == 0){
// shared_heights[BLOCK_SIZE + 1][0] = image_graph[(i + 1) * width + (j - 1)].height;
// }
// else if(localj == BLOCK_SIZE - 1){
// shared_heights[BLOCK_SIZE + 1][BLOCK_SIZE + 1] = image_graph[(i + 1) * width + (j + 1)].height;
// }
// }
// else if(localj == 0){
// shared_heights[locali + 1][0] = image_graph[i * width + (j - 1)].height;
// }
// else if(localj == BLOCK_SIZE - 1){
// shared_heights[locali + 1][BLOCK_SIZE + 1] = image_graph[i * width + (j + 1)].height;
// }
// __syncthreads();
// // Row major traversal of neighbors of a pixel (i,j)
// int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
// int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
// int dest_x, dest_y;
// int min_height = INT_MAX;
// // Check spatial neighbors
// for(int l = 0; l < 8; l++){
// dest_x = (locali + 1) + x_offsets[l];
// dest_y = (localj + 1) + y_offsets[l];
// // if(image_graph[dest_x * width + dest_y].excess > 0 && image_graph[dest_x * width + dest_y].excess != image_graph[dest_x * width + dest_y].){
// // min_height = min(min_height, shared_heights[dest_x][dest_y]);
// // }
// }
// // if(source->is_active){
// // min_height = min(min_height, source->height);
// // }
// // if(sink->is_active){
// // min_height = min(min_height, sink->height);
// // }
// image_graph[i * width + j].height = min_height + 1;
// }
// }
__global__ void localRelabel(Pixel *image_graph, int height, int width)
{
int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
int j = threadIdx.y + blockIdx.x * blockDim.y + 1;
if (i <= height && j <= width)
{
// Row major traversal of neighbors of a pixel (i,j)
int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
int dest_x, dest_y, min_height = image_graph[i * width + j].height;
for(int l = 0; l < 8; l++){
dest_x = i + x_offsets[l];
dest_y = j + y_offsets[l];
min_height = min(min_height, image_graph[dest_x * width + dest_y].height);
}
image_graph[i * width + j].height = min(min_height + 1, image_graph[i * width + j].height);
}
}
// __global__ void globalRelabel(Pixel *image_graph, int height, int width, int iteration){
// int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
// int j = threadIdx.y + blockDim.y * blockIdx.y + 1;
// if (i <= height && j <= width){
// //No divergence
// if(iteration == 1){
// for (int l = 0; l < 8; l++)
// if(image_graph[i * width + j].neighbor_capacities[l] > image_graph[i * width + j].excess){
// image_graph[i * width + j].height = 1;
// }
// }
// else{
// bool satisfied = false;
// int dest_x, dest_y;
// // Row major traversal of neighbors of a pixel (i,j)
// int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
// int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
// for(int l = 0; l < 8; l++){
// dest_x = (locali + 1) + x_offsets[l];
// dest_y = (localj + 1) + y_offsets[l];
// if(shared_heights[dest_x][dest_y] == iteration){
// satisfied = true;
// break;
// }
// }
// if(satisfied){
// shared_heights[locali + 1][localj + 1] = iteration + 1;
// image_graph[i * width + j].height = iteration + 1;
// }
// }
// }
// }
__global__ void initNeighbors(Pixel *image_graph, unsigned char* raw_image, int height, int width, unsigned long long int* K)
{
int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
int j = threadIdx.y + blockDim.y * blockIdx.y + 1;
if (i <= height && j <= width){
image_graph[i * width + j].pixel_value = raw_image[(i - 1) * width + j - 1];
// Row major traversal of neighbors of a pixel (i,j)
int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
unsigned long long int max_k = 0;
unsigned long long edge_weight = 0;
int dest_x, dest_y;
for(int k = 0; k < 8; k++){
dest_x = i + x_offsets[k];
dest_y = j + y_offsets[k];
edge_weight = B_function(image_graph[i * width + j].pixel_value, image_graph[dest_x * width + dest_y].pixel_value );
image_graph[i * width + j].neighbor_capacities[k] = edge_weight;
image_graph[i * width + j].neighbor_flows[k] = 0;
max_k += edge_weight;
}
max_k++;
__syncthreads();
atomicMax(K, max_k);
}
}
//Also accept hard and soft constraints array
// __global__ void initConstraints(Pixel *image_graph, int height, int width, unsigned long long K){
// int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
// int j = threadIdx.y + blockDim.y * blockIdx.y + 1;
// if (i <= height && j <= height){
// // {p,S} edge
// image_graph[i * width + j].neighbor_capacities[8] = (image_graph[i * width + j].hard_constraint == 0) * K
// + (image_graph[i * width + j].hard_constraint == 1) * LAMBDA * R_function(image_graph[i * width + j].pixel_value, -1);
// // {p,T} edge
// image_graph[i * width + j].neighbor_capacities[9] = (image_graph[i * width + j].hard_constraint == -1) * K
// + (image_graph[i * width + j].hard_constraint == 0) * LAMBDA * R_function(image_graph[i * width + j].pixel_value, 1);
// }
// }
int main(int argc, char* argv[]){
int width, height;
unsigned long long* K = new unsigned long long;
*K = LLONG_MAX;
int* convergence_flag = new int, *convergence_flag_gpu;
*convergence_flag = 0;
unsigned char *image, *cuda_image;
unsigned long long *K_gpu, *F_gpu;
Pixel *image_graph, *cuda_image_graph;
Terminal *source, *sink, *cuda_source, *cuda_sink;
ilInit();
ILuint image_id = loadImage(argv[1], &image, width, height);
int pixel_memsize = (width + 1) * (height + 1) * sizeof(Pixel);
if(image_id == 0) {fprintf(stderr, "Error while reading image... aborting.\n"); exit(0);}
//Pixel graph with padding to avoid divergence in kernels for boundary pixels
image_graph = (Pixel*)malloc(pixel_memsize);
source = new Terminal;
sink = new Terminal;
hipMalloc((void**)&F_gpu, (width + 1) * (height + 1) * sizeof(unsigned long long));
hipMalloc((void**)&convergence_flag_gpu, sizeof(int));
hipMalloc((void**)&cuda_image_graph, pixel_memsize);
hipMalloc((void**)&cuda_image, width * height * sizeof(unsigned char));
hipMalloc((void**)&K_gpu, sizeof(unsigned long long));
hipMalloc((void**)&cuda_source, sizeof(Terminal));
hipMalloc((void**)&cuda_sink, sizeof(Terminal));
//Set properties of source and sink nodes
hipMemcpy(cuda_image_graph, image_graph, pixel_memsize, hipMemcpyHostToDevice);
hipMemcpy(cuda_image, image, width * height * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(K_gpu, K, sizeof(unsigned long long), hipMemcpyHostToDevice);
hipMemcpy(convergence_flag_gpu, convergence_flag, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cuda_source, source, sizeof(Terminal), hipMemcpyHostToDevice);
hipMemcpy(cuda_sink, sink, sizeof(Terminal), hipMemcpyHostToDevice);
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 numBlocks(height / BLOCK_SIZE + 1, width / BLOCK_SIZE + 1);
// Load weights in graph using kernel call/host loops
hipLaunchKernelGGL(( initNeighbors), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cuda_image_graph, cuda_image, height, width, K_gpu);
assert(hipSuccess == hipGetLastError());
printf("Initialized spatial weight values\n");
// hipMemcpy(image_graph, cuda_image_graph, pixel_memsize, hipMemcpyDeviceToHost);
// for (int i = 0; i < (width + 1) * (height + 1); i++)
// cout << image_graph[i].neighbor_capacities[0] << ' ';
// cout << hipGetErrorString(hipGetLastError()) << endl;
// initConstraints<<<numBlocks, threadsPerBlock>>>(cuda_image_graph, height, width, *K);
// assert(hipSuccess == hipGetLastError());
// printf("Initialized terminal weight values\n");
int iteration = 1;
while((*convergence_flag) || (!(*convergence_flag && iteration == 1))){
for(int i = 0; i < HYPERk; i++){
for(int j = 0; j < HYPERm; j++){
hipLaunchKernelGGL(( push), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cuda_image_graph, F_gpu, cuda_source, cuda_sink ,height, width, convergence_flag_gpu);
assert(hipSuccess == hipGetLastError());
printf("Local push operation %d %d\n", i, j);
// pull<<<numBlocks, threadsPerBlock>>>(cuda_image_graph, F_gpu, cuda_source, cuda_sink, height, width);
// assert(hipSuccess == hipGetLastError());
// printf("Local pull operation\n");
hipMemcpy(convergence_flag, convergence_flag_gpu, sizeof(int), hipMemcpyDeviceToHost);
// printf("%d\n", *convergence_flag);
}
hipLaunchKernelGGL(( localRelabel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cuda_image_graph, height, width);
assert(hipSuccess == hipGetLastError());
printf("Local relabel operation\n");
}
// globalRelabel<<<numBlocks, threadsPerBlock>>>(cuda_image_graph, height, width, iteration);
// assert(hipSuccess == hipGetLastError());
// printf("Global relabel operation\n");
// iteration++;
// printf("Completed iteration %d\n\n", iteration);
// hipMemcpy(sink, cuda_sink, sizeof(Terminal), hipMemcpyDeviceToHost);
// printf("Flow: %llu\n", sink -> excess);
}
printf("Done with algorithm\n");
// Load segmented image from graph using another kernel and display it
return 0;
}
| 6a446e91b13207deb95940e3a3205d90fa02f2b8.cu | #include <bits/stdc++.h>
#include <IL/il.h>
#include <IL/ilu.h>
#define BLOCK_SIZE 32
#define HYPERm 2
#define HYPERk 4
#define LAMBDA 2
using namespace std;
__device__ unsigned long long B_function(int x, int y){
// return (x - y) * (x - y);
return abs(x - y);
}
__device__ unsigned long long R_function(int x, int y){
//Object
if(y == 1){
return 1;
}
return 2;
}
struct Pixel{
int pixel_value, hard_constraint, height;
unsigned long long neighbor_capacities[10]; //Stored in row major form, followed by source and sink
unsigned long long neighbor_flows[10];
unsigned long long int excess;
bool is_active;
Pixel(){
this -> hard_constraint = 0;
this -> height = 0;
this -> excess = 0;
this -> is_active = false;
}
};
struct Terminal{
unsigned long long int excess;
bool is_active;
int height;
Terminal(){
this -> is_active = false;
this -> height = 0;
this -> excess = 0;
}
};
void saveImage(const char* filename, int width, int height, unsigned char * bitmap){
ILuint imageID = ilGenImage();
ilBindImage(imageID);
ilTexImage(width, height, 0, 1, IL_LUMINANCE, IL_UNSIGNED_BYTE, bitmap);
iluFlipImage();
ilEnable(IL_FILE_OVERWRITE);
ilSave(IL_PNG, filename);
fprintf(stderr, "Image saved as: %s\n", filename);
}
ILuint loadImage(const char *filename, unsigned char ** bitmap, int &width, int &height){
ILuint imageID = ilGenImage();
ilBindImage(imageID);
ILboolean success = ilLoadImage(filename);
if (!success) return 0;
width = ilGetInteger(IL_IMAGE_WIDTH);
height = ilGetInteger(IL_IMAGE_HEIGHT);
printf("Width: %d\t Height: %d\n", width, height);
*bitmap = ilGetData();
return imageID;
}
__global__ void push(Pixel *image_graph, unsigned long long *F, Terminal *source, Terminal *sink, int height, int width, int *convergence_flag){
int i = (threadIdx.x + blockIdx.x * blockDim.x) + 1;
int j = (threadIdx.y + blockDim.y * blockIdx.y) + 1;
if (i <= height && j <= width){
// unsigned long long *neighbor_flows = image_graph[i * width + j].neighbor_flows;
// unsigned long long *neighbor_capacities = image_graph[i * width + j].neighbor_capacities;
unsigned long long excess = image_graph[i * width + j].excess;
// Row major traversal of neighbors of a pixel (i,j)
int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
int thread_flag = 0;
int dest_x, dest_y;
// Check spatial neighbors
for(int l = 0; l < 8; l++){
dest_x = i + x_offsets[l];
dest_y = j + y_offsets[l];
if(image_graph[dest_x * width + dest_y].height + 1 == image_graph[i * width + j].height){
int flow = min(image_graph[i * width + j].neighbor_capacities[l] - image_graph[i * width + j].neighbor_flows[l], excess);
atomicAdd(&(image_graph[i * width + j].excess) , -flow) ;
atomicAdd(&(image_graph[dest_x * width + dest_y].excess), flow) ;
atomicAdd(&(image_graph[i * width + j].neighbor_capacities[l]) , -flow) ;
atomicAdd(&(image_graph[dest_x * width + dest_y].neighbor_capacities[7 - l]), flow) ;
thread_flag = 1;
}
}
unsigned long long flow = min(image_graph[i * width + j].excess, image_graph[i * width + j].neighbor_capacities[9]);
atomicAdd(&image_graph[i * width + j].neighbor_flows[9], flow);
if (image_graph[i * width + j].excess == flow)
atomicAdd(&image_graph[i * width + j].excess, -flow);
atomicAdd(&(sink -> excess), flow);
__syncthreads();
// Update flags
atomicOr(convergence_flag, thread_flag);
// printf("%d ", *convergence_flag);
}
}
// __global__ void pull(Pixel *image_graph, unsigned long long *F, Terminal *source, Terminal *sink, int height, int width){
// int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
// int j = threadIdx.y + blockDim.y * blockIdx.y + 1;
// // Should be <=, but fails for that
// if (i < height && j < width){
// unsigned long long aggregate_flow = 0;
// // Row major traversal of neighbors of a pixel (i,j)
// int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
// int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
// int dest_x, dest_y;
// // Check spatial neighbors
// for(int k = 0; k < 8; k++){
// dest_x = i + x_offsets[k];
// dest_y = j + y_offsets[k];
// aggregate_flow += F[dest_x * width + dest_y];
// }
// aggregate_flow += source->excess;
// // aggregate_flow += source->excess;
// image_graph[i * width + j].excess += aggregate_flow;
// }
// }
// __global__ void localRelabel(Pixel *image_graph, Terminal *source, Terminal *sink, int height, int width){
// int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
// int j = threadIdx.y + blockDim.y * blockIdx.y + 1;
// int locali = (i - 1) % BLOCK_SIZE, localj = (j - 1) % BLOCK_SIZE;
// if (i <= height && j <= width){
// __shared__ int shared_heights[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
// // __shared__ bool shared_flags[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
// shared_heights[locali + 1][localj + 1] = image_graph[i * width + j].height;
// // shared_flags[locali + 1][localj + 1] = image_graph[i * width + j].is_active;
// //Boundary pixels of grid
// if(locali == 0){
// shared_heights[0][localj + 1] = image_graph[(i - 1) * width + j].height;
// if(localj == 0){
// shared_heights[0][0] = image_graph[(i - 1) * width + (j - 1)].height;
// }
// else if(localj == BLOCK_SIZE - 1){
// shared_heights[0][BLOCK_SIZE + 1] = image_graph[(i - 1) * width + (j + 1)].height;
// }
// }
// else if(locali == BLOCK_SIZE - 1){
// shared_heights[BLOCK_SIZE + 1][localj + 1] = image_graph[(i + 1) * width + j].height;
// if(localj == 0){
// shared_heights[BLOCK_SIZE + 1][0] = image_graph[(i + 1) * width + (j - 1)].height;
// }
// else if(localj == BLOCK_SIZE - 1){
// shared_heights[BLOCK_SIZE + 1][BLOCK_SIZE + 1] = image_graph[(i + 1) * width + (j + 1)].height;
// }
// }
// else if(localj == 0){
// shared_heights[locali + 1][0] = image_graph[i * width + (j - 1)].height;
// }
// else if(localj == BLOCK_SIZE - 1){
// shared_heights[locali + 1][BLOCK_SIZE + 1] = image_graph[i * width + (j + 1)].height;
// }
// __syncthreads();
// // Row major traversal of neighbors of a pixel (i,j)
// int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
// int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
// int dest_x, dest_y;
// int min_height = INT_MAX;
// // Check spatial neighbors
// for(int l = 0; l < 8; l++){
// dest_x = (locali + 1) + x_offsets[l];
// dest_y = (localj + 1) + y_offsets[l];
// // if(image_graph[dest_x * width + dest_y].excess > 0 && image_graph[dest_x * width + dest_y].excess != image_graph[dest_x * width + dest_y].){
// // min_height = min(min_height, shared_heights[dest_x][dest_y]);
// // }
// }
// // if(source->is_active){
// // min_height = min(min_height, source->height);
// // }
// // if(sink->is_active){
// // min_height = min(min_height, sink->height);
// // }
// image_graph[i * width + j].height = min_height + 1;
// }
// }
__global__ void localRelabel(Pixel *image_graph, int height, int width)
{
int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
int j = threadIdx.y + blockIdx.x * blockDim.y + 1;
if (i <= height && j <= width)
{
// Row major traversal of neighbors of a pixel (i,j)
int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
int dest_x, dest_y, min_height = image_graph[i * width + j].height;
for(int l = 0; l < 8; l++){
dest_x = i + x_offsets[l];
dest_y = j + y_offsets[l];
min_height = min(min_height, image_graph[dest_x * width + dest_y].height);
}
image_graph[i * width + j].height = min(min_height + 1, image_graph[i * width + j].height);
}
}
// __global__ void globalRelabel(Pixel *image_graph, int height, int width, int iteration){
// int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
// int j = threadIdx.y + blockDim.y * blockIdx.y + 1;
// if (i <= height && j <= width){
// //No divergence
// if(iteration == 1){
// for (int l = 0; l < 8; l++)
// if(image_graph[i * width + j].neighbor_capacities[l] > image_graph[i * width + j].excess){
// image_graph[i * width + j].height = 1;
// }
// }
// else{
// bool satisfied = false;
// int dest_x, dest_y;
// // Row major traversal of neighbors of a pixel (i,j)
// int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
// int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
// for(int l = 0; l < 8; l++){
// dest_x = (locali + 1) + x_offsets[l];
// dest_y = (localj + 1) + y_offsets[l];
// if(shared_heights[dest_x][dest_y] == iteration){
// satisfied = true;
// break;
// }
// }
// if(satisfied){
// shared_heights[locali + 1][localj + 1] = iteration + 1;
// image_graph[i * width + j].height = iteration + 1;
// }
// }
// }
// }
__global__ void initNeighbors(Pixel *image_graph, unsigned char* raw_image, int height, int width, unsigned long long int* K)
{
int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
int j = threadIdx.y + blockDim.y * blockIdx.y + 1;
if (i <= height && j <= width){
image_graph[i * width + j].pixel_value = raw_image[(i - 1) * width + j - 1];
// Row major traversal of neighbors of a pixel (i,j)
int x_offsets[] = {-1, -1, -1, 0, 0, 1, 1, 1};
int y_offsets[] = {-1, 0, 1, -1, 1, -1, 0, 1};
unsigned long long int max_k = 0;
unsigned long long edge_weight = 0;
int dest_x, dest_y;
for(int k = 0; k < 8; k++){
dest_x = i + x_offsets[k];
dest_y = j + y_offsets[k];
edge_weight = B_function(image_graph[i * width + j].pixel_value, image_graph[dest_x * width + dest_y].pixel_value );
image_graph[i * width + j].neighbor_capacities[k] = edge_weight;
image_graph[i * width + j].neighbor_flows[k] = 0;
max_k += edge_weight;
}
max_k++;
__syncthreads();
atomicMax(K, max_k);
}
}
//Also accept hard and soft constraints array
// __global__ void initConstraints(Pixel *image_graph, int height, int width, unsigned long long K){
// int i = threadIdx.x + blockIdx.x * blockDim.x + 1;
// int j = threadIdx.y + blockDim.y * blockIdx.y + 1;
// if (i <= height && j <= height){
// // {p,S} edge
// image_graph[i * width + j].neighbor_capacities[8] = (image_graph[i * width + j].hard_constraint == 0) * K
// + (image_graph[i * width + j].hard_constraint == 1) * LAMBDA * R_function(image_graph[i * width + j].pixel_value, -1);
// // {p,T} edge
// image_graph[i * width + j].neighbor_capacities[9] = (image_graph[i * width + j].hard_constraint == -1) * K
// + (image_graph[i * width + j].hard_constraint == 0) * LAMBDA * R_function(image_graph[i * width + j].pixel_value, 1);
// }
// }
int main(int argc, char* argv[]){
int width, height;
unsigned long long* K = new unsigned long long;
*K = LLONG_MAX;
int* convergence_flag = new int, *convergence_flag_gpu;
*convergence_flag = 0;
unsigned char *image, *cuda_image;
unsigned long long *K_gpu, *F_gpu;
Pixel *image_graph, *cuda_image_graph;
Terminal *source, *sink, *cuda_source, *cuda_sink;
ilInit();
ILuint image_id = loadImage(argv[1], &image, width, height);
int pixel_memsize = (width + 1) * (height + 1) * sizeof(Pixel);
if(image_id == 0) {fprintf(stderr, "Error while reading image... aborting.\n"); exit(0);}
//Pixel graph with padding to avoid divergence in kernels for boundary pixels
image_graph = (Pixel*)malloc(pixel_memsize);
source = new Terminal;
sink = new Terminal;
cudaMalloc((void**)&F_gpu, (width + 1) * (height + 1) * sizeof(unsigned long long));
cudaMalloc((void**)&convergence_flag_gpu, sizeof(int));
cudaMalloc((void**)&cuda_image_graph, pixel_memsize);
cudaMalloc((void**)&cuda_image, width * height * sizeof(unsigned char));
cudaMalloc((void**)&K_gpu, sizeof(unsigned long long));
cudaMalloc((void**)&cuda_source, sizeof(Terminal));
cudaMalloc((void**)&cuda_sink, sizeof(Terminal));
//Set properties of source and sink nodes
cudaMemcpy(cuda_image_graph, image_graph, pixel_memsize, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_image, image, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(K_gpu, K, sizeof(unsigned long long), cudaMemcpyHostToDevice);
cudaMemcpy(convergence_flag_gpu, convergence_flag, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_source, source, sizeof(Terminal), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_sink, sink, sizeof(Terminal), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 numBlocks(height / BLOCK_SIZE + 1, width / BLOCK_SIZE + 1);
// Load weights in graph using kernel call/host loops
initNeighbors<<<numBlocks, threadsPerBlock>>>(cuda_image_graph, cuda_image, height, width, K_gpu);
assert(cudaSuccess == cudaGetLastError());
printf("Initialized spatial weight values\n");
// cudaMemcpy(image_graph, cuda_image_graph, pixel_memsize, cudaMemcpyDeviceToHost);
// for (int i = 0; i < (width + 1) * (height + 1); i++)
// cout << image_graph[i].neighbor_capacities[0] << ' ';
// cout << cudaGetErrorString(cudaGetLastError()) << endl;
// initConstraints<<<numBlocks, threadsPerBlock>>>(cuda_image_graph, height, width, *K);
// assert(cudaSuccess == cudaGetLastError());
// printf("Initialized terminal weight values\n");
int iteration = 1;
while((*convergence_flag) || (!(*convergence_flag && iteration == 1))){
for(int i = 0; i < HYPERk; i++){
for(int j = 0; j < HYPERm; j++){
push<<<numBlocks, threadsPerBlock>>>(cuda_image_graph, F_gpu, cuda_source, cuda_sink ,height, width, convergence_flag_gpu);
assert(cudaSuccess == cudaGetLastError());
printf("Local push operation %d %d\n", i, j);
// pull<<<numBlocks, threadsPerBlock>>>(cuda_image_graph, F_gpu, cuda_source, cuda_sink, height, width);
// assert(cudaSuccess == cudaGetLastError());
// printf("Local pull operation\n");
cudaMemcpy(convergence_flag, convergence_flag_gpu, sizeof(int), cudaMemcpyDeviceToHost);
// printf("%d\n", *convergence_flag);
}
localRelabel<<<numBlocks, threadsPerBlock>>>(cuda_image_graph, height, width);
assert(cudaSuccess == cudaGetLastError());
printf("Local relabel operation\n");
}
// globalRelabel<<<numBlocks, threadsPerBlock>>>(cuda_image_graph, height, width, iteration);
// assert(cudaSuccess == cudaGetLastError());
// printf("Global relabel operation\n");
// iteration++;
// printf("Completed iteration %d\n\n", iteration);
// cudaMemcpy(sink, cuda_sink, sizeof(Terminal), cudaMemcpyDeviceToHost);
// printf("Flow: %llu\n", sink -> excess);
}
printf("Done with algorithm\n");
// Load segmented image from graph using another kernel and display it
return 0;
}
|
7228197bb5964b155aa99f2e196a6b5291ce98b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Simulate growing mesenchyme envelopped by epithelium
#include <hiprand/hiprand_kernel.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <time.h>
#include "../include/dtypes.cuh"
#include "../include/inits.cuh"
#include "../include/polarity.cuh"
#include "../include/property.cuh"
#include "../include/solvers.cuh"
#include "../include/utils.cuh"
#include "../include/vtk.cuh"
const auto r_max = 1;
const auto mean_dist = 0.75;
const auto prolif_rate = 0.006;
const auto n_0 = 200;
const auto n_max = 5000;
const auto n_time_steps = 500;
const auto dt = 0.2;
enum Cell_types { mesenchyme, epithelium };
__device__ Cell_types* d_type;
__device__ int* d_mes_nbs; // number of mesenchymal neighbours
__device__ int* d_epi_nbs;
__device__ Po_cell relu_w_epithelium(
Po_cell Xi, Po_cell r, float dist, int i, int j)
{
Po_cell dF{0};
if (i == j) return dF;
if (dist > r_max) return dF;
float F;
if (d_type[i] == d_type[j]) {
F = fmaxf(0.7 - dist, 0) * 2 - fmaxf(dist - 0.8, 0);
} else {
F = fmaxf(0.8 - dist, 0) * 2 - fmaxf(dist - 0.9, 0);
}
dF.x = r.x * F / dist;
dF.y = r.y * F / dist;
dF.z = r.z * F / dist;
if (d_type[j] == mesenchyme)
d_mes_nbs[i] += 1;
else
d_epi_nbs[i] += 1;
if (d_type[i] == mesenchyme or d_type[j] == mesenchyme) return dF;
dF += bending_force(Xi, r, dist) * 0.15;
return dF;
}
__global__ void proliferate(float rate, int n_cells, hiprandState_t* d_state,
Po_cell* d_X, float3* d_old_v, int* d_n_cells)
{
D_ASSERT(n_cells * rate <= n_max);
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_cells) return; // Dividing new cells is problematic!
switch (d_type[i]) {
case mesenchyme: {
auto rnd = hiprand_uniform(&d_state[i]);
if (rnd > rate) return;
break;
}
case epithelium: {
if (d_epi_nbs[i] > d_mes_nbs[i]) return;
}
}
auto n = atomicAdd(d_n_cells, 1);
auto theta = acosf(2. * hiprand_uniform(&d_state[i]) - 1);
auto phi = hiprand_uniform(&d_state[i]) * 2 * M_PI;
d_X[n].x = d_X[i].x + mean_dist / 4 * sinf(theta) * cosf(phi);
d_X[n].y = d_X[i].y + mean_dist / 4 * sinf(theta) * sinf(phi);
d_X[n].z = d_X[i].z + mean_dist / 4 * cosf(theta);
d_X[n].theta = d_X[i].theta;
d_X[n].phi = d_X[i].phi;
d_type[n] = d_type[i];
d_mes_nbs[n] = 0;
d_epi_nbs[n] = 0;
d_old_v[n] = d_old_v[i];
}
int main(int argc, const char* argv[])
{
// Prepare initial state
Solution<Po_cell, Grid_solver> cells{n_max};
*cells.h_n = n_0;
relaxed_sphere(mean_dist, cells);
Property<Cell_types> type{n_max};
for (auto i = 0; i < n_0; i++) type.h_prop[i] = mesenchyme;
hipMemcpyToSymbol(d_type, &type.d_prop, sizeof(d_type));
type.copy_to_device();
Property<int> n_mes_nbs{n_max};
hipMemcpyToSymbol(d_mes_nbs, &n_mes_nbs.d_prop, sizeof(d_mes_nbs));
Property<int> n_epi_nbs{n_max};
hipMemcpyToSymbol(d_epi_nbs, &n_epi_nbs.d_prop, sizeof(d_epi_nbs));
auto reset_nbs = [&](int n, const Po_cell* __restrict__ d_X, Po_cell* d_dX) {
thrust::fill(thrust::device, n_mes_nbs.d_prop,
n_mes_nbs.d_prop + cells.get_d_n(), 0);
thrust::fill(thrust::device, n_epi_nbs.d_prop,
n_epi_nbs.d_prop + cells.get_d_n(), 0);
};
hiprandState_t* d_state;
hipMalloc(&d_state, n_max * sizeof(hiprandState_t));
auto seed = time(NULL);
hipLaunchKernelGGL(( setup_rand_states), dim3((n_max + 128 - 1) / 128), dim3(128), 0, 0, n_max, seed, d_state);
// Find epithelium
thrust::fill(thrust::device, n_mes_nbs.d_prop, n_mes_nbs.d_prop + n_0, 0);
cells.take_step<relu_w_epithelium>(dt);
cells.copy_to_host();
n_mes_nbs.copy_to_host();
for (auto i = 0; i < n_0; i++) {
if (n_mes_nbs.h_prop[i] < 12 * 2) { // *2 for 2nd order solver
type.h_prop[i] = epithelium;
auto dist = sqrtf(cells.h_X[i].x * cells.h_X[i].x +
cells.h_X[i].y * cells.h_X[i].y +
cells.h_X[i].z * cells.h_X[i].z);
cells.h_X[i].theta = acosf(cells.h_X[i].z / dist);
cells.h_X[i].phi = atan2(cells.h_X[i].y, cells.h_X[i].x);
} else {
cells.h_X[i].theta = 0;
cells.h_X[i].phi = 0;
}
}
cells.copy_to_device();
type.copy_to_device();
// Simulate growth
Vtk_output output{"passive_growth"};
for (auto time_step = 0; time_step <= n_time_steps; time_step++) {
cells.copy_to_host();
type.copy_to_host();
cells.take_step<relu_w_epithelium>(dt, reset_nbs);
hipLaunchKernelGGL(( proliferate), dim3((cells.get_d_n() + 128 - 1) / 128), dim3(128), 0, 0,
prolif_rate * (time_step > 100), cells.get_d_n(), d_state,
cells.d_X, cells.d_old_v, cells.d_n);
output.write_positions(cells);
output.write_property(type);
output.write_polarity(cells);
}
return 0;
}
| 7228197bb5964b155aa99f2e196a6b5291ce98b8.cu | // Simulate growing mesenchyme envelopped by epithelium
#include <curand_kernel.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <time.h>
#include "../include/dtypes.cuh"
#include "../include/inits.cuh"
#include "../include/polarity.cuh"
#include "../include/property.cuh"
#include "../include/solvers.cuh"
#include "../include/utils.cuh"
#include "../include/vtk.cuh"
const auto r_max = 1;
const auto mean_dist = 0.75;
const auto prolif_rate = 0.006;
const auto n_0 = 200;
const auto n_max = 5000;
const auto n_time_steps = 500;
const auto dt = 0.2;
enum Cell_types { mesenchyme, epithelium };
__device__ Cell_types* d_type;
__device__ int* d_mes_nbs; // number of mesenchymal neighbours
__device__ int* d_epi_nbs;
__device__ Po_cell relu_w_epithelium(
Po_cell Xi, Po_cell r, float dist, int i, int j)
{
Po_cell dF{0};
if (i == j) return dF;
if (dist > r_max) return dF;
float F;
if (d_type[i] == d_type[j]) {
F = fmaxf(0.7 - dist, 0) * 2 - fmaxf(dist - 0.8, 0);
} else {
F = fmaxf(0.8 - dist, 0) * 2 - fmaxf(dist - 0.9, 0);
}
dF.x = r.x * F / dist;
dF.y = r.y * F / dist;
dF.z = r.z * F / dist;
if (d_type[j] == mesenchyme)
d_mes_nbs[i] += 1;
else
d_epi_nbs[i] += 1;
if (d_type[i] == mesenchyme or d_type[j] == mesenchyme) return dF;
dF += bending_force(Xi, r, dist) * 0.15;
return dF;
}
__global__ void proliferate(float rate, int n_cells, curandState* d_state,
Po_cell* d_X, float3* d_old_v, int* d_n_cells)
{
D_ASSERT(n_cells * rate <= n_max);
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_cells) return; // Dividing new cells is problematic!
switch (d_type[i]) {
case mesenchyme: {
auto rnd = curand_uniform(&d_state[i]);
if (rnd > rate) return;
break;
}
case epithelium: {
if (d_epi_nbs[i] > d_mes_nbs[i]) return;
}
}
auto n = atomicAdd(d_n_cells, 1);
auto theta = acosf(2. * curand_uniform(&d_state[i]) - 1);
auto phi = curand_uniform(&d_state[i]) * 2 * M_PI;
d_X[n].x = d_X[i].x + mean_dist / 4 * sinf(theta) * cosf(phi);
d_X[n].y = d_X[i].y + mean_dist / 4 * sinf(theta) * sinf(phi);
d_X[n].z = d_X[i].z + mean_dist / 4 * cosf(theta);
d_X[n].theta = d_X[i].theta;
d_X[n].phi = d_X[i].phi;
d_type[n] = d_type[i];
d_mes_nbs[n] = 0;
d_epi_nbs[n] = 0;
d_old_v[n] = d_old_v[i];
}
int main(int argc, const char* argv[])
{
// Prepare initial state
Solution<Po_cell, Grid_solver> cells{n_max};
*cells.h_n = n_0;
relaxed_sphere(mean_dist, cells);
Property<Cell_types> type{n_max};
for (auto i = 0; i < n_0; i++) type.h_prop[i] = mesenchyme;
cudaMemcpyToSymbol(d_type, &type.d_prop, sizeof(d_type));
type.copy_to_device();
Property<int> n_mes_nbs{n_max};
cudaMemcpyToSymbol(d_mes_nbs, &n_mes_nbs.d_prop, sizeof(d_mes_nbs));
Property<int> n_epi_nbs{n_max};
cudaMemcpyToSymbol(d_epi_nbs, &n_epi_nbs.d_prop, sizeof(d_epi_nbs));
auto reset_nbs = [&](int n, const Po_cell* __restrict__ d_X, Po_cell* d_dX) {
thrust::fill(thrust::device, n_mes_nbs.d_prop,
n_mes_nbs.d_prop + cells.get_d_n(), 0);
thrust::fill(thrust::device, n_epi_nbs.d_prop,
n_epi_nbs.d_prop + cells.get_d_n(), 0);
};
curandState* d_state;
cudaMalloc(&d_state, n_max * sizeof(curandState));
auto seed = time(NULL);
setup_rand_states<<<(n_max + 128 - 1) / 128, 128>>>(n_max, seed, d_state);
// Find epithelium
thrust::fill(thrust::device, n_mes_nbs.d_prop, n_mes_nbs.d_prop + n_0, 0);
cells.take_step<relu_w_epithelium>(dt);
cells.copy_to_host();
n_mes_nbs.copy_to_host();
for (auto i = 0; i < n_0; i++) {
if (n_mes_nbs.h_prop[i] < 12 * 2) { // *2 for 2nd order solver
type.h_prop[i] = epithelium;
auto dist = sqrtf(cells.h_X[i].x * cells.h_X[i].x +
cells.h_X[i].y * cells.h_X[i].y +
cells.h_X[i].z * cells.h_X[i].z);
cells.h_X[i].theta = acosf(cells.h_X[i].z / dist);
cells.h_X[i].phi = atan2(cells.h_X[i].y, cells.h_X[i].x);
} else {
cells.h_X[i].theta = 0;
cells.h_X[i].phi = 0;
}
}
cells.copy_to_device();
type.copy_to_device();
// Simulate growth
Vtk_output output{"passive_growth"};
for (auto time_step = 0; time_step <= n_time_steps; time_step++) {
cells.copy_to_host();
type.copy_to_host();
cells.take_step<relu_w_epithelium>(dt, reset_nbs);
proliferate<<<(cells.get_d_n() + 128 - 1) / 128, 128>>>(
prolif_rate * (time_step > 100), cells.get_d_n(), d_state,
cells.d_X, cells.d_old_v, cells.d_n);
output.write_positions(cells);
output.write_property(type);
output.write_polarity(cells);
}
return 0;
}
|
7e1bf91de6387f21403de6d4cb7e4c9c130638ab.hip | // !!! This is a file automatically generated by hipify!!!
/*
To compile:
nvcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for CUDA header file here:
#include "hip/hip_runtime.h"
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
// Q2c: transform this function into a CUDA kernel
__global__ void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of threads per block and the number of blocks here:
int Bx = Nthreads;
int By = Nthreads;
dim3 B(Bx,By,1);
int Gx = (Nre+Bx -1)/Bx;
int Gy = (Nim+By -1)/By;
dim3 G(Gx,Gy,1);
// storage for the iteration counts
float *countD = (float*) hipMalloc(&countD,Nre*Nim*sizeof(float));
float *countH = (float*) hipMalloc(&countH,Nre*Nim*sizeof(float));
hipMemcpy(countD,countH, N*sizeof(float),hipMemcpyHostToDevice);
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
clock_t start = clock(); //start time in CPU cycles
// compute mandelbrot set
hipLaunchKernelGGL(( mandelbrot) , dim3(G),dim3(B), 0, 0, Nre, Nim, cmin, cmax, count);
clock_t end = clock(); //start time in CPU cycles
// print elapsed time
printf("elapsed = %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
printf("Printing mandelbrot.png...");
write_hot_png(fp, Nre, Nim, count, 0, 80);
printf("done.\n");
free(count);
exit(0);
return 0;
}
| 7e1bf91de6387f21403de6d4cb7e4c9c130638ab.cu | /*
To compile:
nvcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for CUDA header file here:
#include "cuda.h"
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
// Q2c: transform this function into a CUDA kernel
__global__ void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of threads per block and the number of blocks here:
int Bx = Nthreads;
int By = Nthreads;
dim3 B(Bx,By,1);
int Gx = (Nre+Bx -1)/Bx;
int Gy = (Nim+By -1)/By;
dim3 G(Gx,Gy,1);
// storage for the iteration counts
float *countD = (float*) cudaMalloc(&countD,Nre*Nim*sizeof(float));
float *countH = (float*) cudaMalloc(&countH,Nre*Nim*sizeof(float));
cudaMemcpy(countD,countH, N*sizeof(float),cudaMemcpyHostToDevice);
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
clock_t start = clock(); //start time in CPU cycles
// compute mandelbrot set
mandelbrot <<<G,B>>> (Nre, Nim, cmin, cmax, count);
clock_t end = clock(); //start time in CPU cycles
// print elapsed time
printf("elapsed = %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
printf("Printing mandelbrot.png...");
write_hot_png(fp, Nre, Nim, count, 0, 80);
printf("done.\n");
free(count);
exit(0);
return 0;
}
|
6041405a9207efae8caf8630af8563b7c383bc55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// u = grad(f)
// u is a vector field and f a scalar field.
__global__ void
grad_kernel_noshared(const real* __restrict__ f, real * __restrict__ u, const real xfactor,
const real yfactor, const real zfactor)
{
// Global indices
const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST;
const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST;
// Z-wise iteration values
real behind3,
behind2 = f[vfidx(xi, yi, 0)],
behind1 = f[vfidx(xi, yi, 1)],
current = f[vfidx(xi, yi, 2)],
forward1 = f[vfidx(xi, yi, 3)],
forward2 = f[vfidx(xi, yi, 4)],
forward3 = f[vfidx(xi, yi, 5)];
for (int zi = NGHOST; zi < NZ + NGHOST; zi++) {
// Iterate through z dimension in registers
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = forward1;
forward1 = forward2;
forward2 = forward3;
forward3 = f[vfidx(xi, yi, zi + 3)];
// Compute the gradient
u[vfidx(xi, yi, zi, 0)] = xfactor * fd1D(
f[vfidx(xi - 3, yi, zi)],
f[vfidx(xi - 2, yi, zi)],
f[vfidx(xi - 1, yi, zi)],
f[vfidx(xi + 1, yi, zi)],
f[vfidx(xi + 2, yi, zi)],
f[vfidx(xi + 3, yi, zi)]);
u[vfidx(xi, yi, zi, 1)] = yfactor * fd1D(
f[vfidx(xi, yi - 3, zi)],
f[vfidx(xi, yi - 2, zi)],
f[vfidx(xi, yi - 1, zi)],
f[vfidx(xi, yi + 1, zi)],
f[vfidx(xi, yi + 2, zi)],
f[vfidx(xi, yi + 3, zi)]);
u[vfidx(xi, yi, zi, 2)] = zfactor * fd1D(
behind3, behind2, behind1,
forward1, forward2, forward3);
}
}
void
grad_noshared(vf3dgpu &f, vf3dgpu &u)
{
hipLaunchKernelGGL(( grad_kernel_noshared), dim3(xy_tile.nblocks), dim3(xy_tile.nthreads), 0, 0, f.mem(), u.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
| 6041405a9207efae8caf8630af8563b7c383bc55.cu | // u = grad(f)
// u is a vector field and f a scalar field.
__global__ void
grad_kernel_noshared(const real* __restrict__ f, real * __restrict__ u, const real xfactor,
const real yfactor, const real zfactor)
{
// Global indices
const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST;
const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST;
// Z-wise iteration values
real behind3,
behind2 = f[vfidx(xi, yi, 0)],
behind1 = f[vfidx(xi, yi, 1)],
current = f[vfidx(xi, yi, 2)],
forward1 = f[vfidx(xi, yi, 3)],
forward2 = f[vfidx(xi, yi, 4)],
forward3 = f[vfidx(xi, yi, 5)];
for (int zi = NGHOST; zi < NZ + NGHOST; zi++) {
// Iterate through z dimension in registers
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = forward1;
forward1 = forward2;
forward2 = forward3;
forward3 = f[vfidx(xi, yi, zi + 3)];
// Compute the gradient
u[vfidx(xi, yi, zi, 0)] = xfactor * fd1D(
f[vfidx(xi - 3, yi, zi)],
f[vfidx(xi - 2, yi, zi)],
f[vfidx(xi - 1, yi, zi)],
f[vfidx(xi + 1, yi, zi)],
f[vfidx(xi + 2, yi, zi)],
f[vfidx(xi + 3, yi, zi)]);
u[vfidx(xi, yi, zi, 1)] = yfactor * fd1D(
f[vfidx(xi, yi - 3, zi)],
f[vfidx(xi, yi - 2, zi)],
f[vfidx(xi, yi - 1, zi)],
f[vfidx(xi, yi + 1, zi)],
f[vfidx(xi, yi + 2, zi)],
f[vfidx(xi, yi + 3, zi)]);
u[vfidx(xi, yi, zi, 2)] = zfactor * fd1D(
behind3, behind2, behind1,
forward1, forward2, forward3);
}
}
void
grad_noshared(vf3dgpu &f, vf3dgpu &u)
{
grad_kernel_noshared<<<xy_tile.nblocks, xy_tile.nthreads>>>(f.mem(), u.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
|
9b0716a5e1a884901c5251c9f390e281891789bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _CRT_SECURE_NO_WARNINGS
#include <stdlib.h>
#include <stdio.h>
#include "sentinel-fileutilsmsg.h"
#define isdecimal(ch) ((ch) >= '0' && (ch) <= '9')
struct passwd { short pw_uid; };
__device__ struct passwd *getpwnam(char *name) { return nullptr; }
__device__ __managed__ struct passwd *m_getpwnam_rc;
__global__ void g_getpwnam(char *name)
{
m_getpwnam_rc = getpwnam(name);
}
struct passwd *getpwnam_(char *str)
{
int strLength = strlen(str) + 1;
char *d_str;
hipMalloc(&d_str, strLength);
hipMemcpy(d_str, str, strLength, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( g_getpwnam), dim3(1),dim3(1), 0, 0, d_str);
hipFree(d_str);
return m_getpwnam_rc;
}
__forceinline int dchown_(char *str, int uid) { fileutils_dchown msg(str, uid); return msg.RC; }
int main(int argc, char **argv)
{
atexit(sentinelClientShutdown);
sentinelClientInitialize();
char *cp = argv[1];
int uid;
if (isdecimal(*cp)) {
uid = 0;
while (isdecimal(*cp))
uid = uid * 10 + (*cp++ - '0');
if (*cp) {
fprintf(stderr, "Bad uid value\n");
exit(1);
}
}
else {
struct passwd *pwd = getpwnam_(cp);
if (!pwd) {
fprintf(stderr, "Unknown user name\n");
exit(1);
}
uid = pwd->pw_uid;
}
//
argc--;
argv++;
while (argc-- > 1) {
argv++;
if (dchown_(*argv, uid))
perror(*argv);
}
exit(0);
}
| 9b0716a5e1a884901c5251c9f390e281891789bd.cu | #define _CRT_SECURE_NO_WARNINGS
#include <stdlib.h>
#include <stdio.h>
#include "sentinel-fileutilsmsg.h"
#define isdecimal(ch) ((ch) >= '0' && (ch) <= '9')
struct passwd { short pw_uid; };
__device__ struct passwd *getpwnam(char *name) { return nullptr; }
__device__ __managed__ struct passwd *m_getpwnam_rc;
__global__ void g_getpwnam(char *name)
{
m_getpwnam_rc = getpwnam(name);
}
struct passwd *getpwnam_(char *str)
{
int strLength = strlen(str) + 1;
char *d_str;
cudaMalloc(&d_str, strLength);
cudaMemcpy(d_str, str, strLength, cudaMemcpyHostToDevice);
g_getpwnam<<<1,1>>>(d_str);
cudaFree(d_str);
return m_getpwnam_rc;
}
__forceinline int dchown_(char *str, int uid) { fileutils_dchown msg(str, uid); return msg.RC; }
int main(int argc, char **argv)
{
atexit(sentinelClientShutdown);
sentinelClientInitialize();
char *cp = argv[1];
int uid;
if (isdecimal(*cp)) {
uid = 0;
while (isdecimal(*cp))
uid = uid * 10 + (*cp++ - '0');
if (*cp) {
fprintf(stderr, "Bad uid value\n");
exit(1);
}
}
else {
struct passwd *pwd = getpwnam_(cp);
if (!pwd) {
fprintf(stderr, "Unknown user name\n");
exit(1);
}
uid = pwd->pw_uid;
}
//
argc--;
argv++;
while (argc-- > 1) {
argv++;
if (dchown_(*argv, uid))
perror(*argv);
}
exit(0);
}
|
3b36c16a0a042f58e34009afe64f37e903ae5caf.hip | // !!! This is a file automatically generated by hipify!!!
/**************************************************************************\
|
| Copyright (C) 2009 Marc Stevens
|
| This program is free software: you can redistribute it and/or modify
| it under the terms of the GNU General Public License as published by
| the Free Software Foundation, either version 3 of the License, or
| (at your option) any later version.
|
| This program is distributed in the hope that it will be useful,
| but WITHOUT ANY WARRANTY; without even the implied warranty of
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
| GNU General Public License for more details.
|
| You should have received a copy of the GNU General Public License
| along with this program. If not, see <http://www.gnu.org/licenses/>.
|
\**************************************************************************/
#include <iostream>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <stdexcept>
#include <boost/cstdint.hpp>
using namespace std;
typedef boost::uint32_t uint32;
typedef boost::uint64_t uint64;
#define MAX_CUDA_THREADS (1<<20)
#define REGISTERS_PER_CUDA_THREAD 32
#define TRAIL_NOCONSTRUCTOR
#include "birthday_types.hpp"
#ifndef CUDA_SAFE_CALL
#define CUDA_SAFE_CALL(s) { auto ce = s; if (ce != hipSuccess) { throw std::runtime_error("CUDA API Error:\n" + std::string(hipGetErrorName(ce)) + ":\n" + std::string(hipGetErrorString(ce))); } }
#endif
#ifndef cutilSafeCall
#define cutilSafeCall(s) (s)
#endif
class cuda_device_detail {
public:
uint32 device;
uint32 blocks;
uint32 threadsperblock;
trail_type* buffer_host;
};
/* We assume that these are _thread specific_ (instead of global) storage managed by the cuda realtime libraries */
__device__ trail_type working_states2[MAX_CUDA_THREADS];
__device__ trail_type buffer2[MAX_CUDA_THREADS];
__constant__ uint32 msg1[16], msg2[16], ihv1[4], ihv2[4], ihv2mod[4];
__constant__ uint32 precomp1[4], precomp2[4];
__constant__ uint32 hybridmask, distinguishedpointmask, maximumpathlength;
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define MD5_F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define MD5_G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define MD5_H(x, y, z) ((x) ^ (y) ^ (z))
#define MD5_I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define MD5_FF(a, b, c, d, x, s, ac) \
{(a) += MD5_F ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_GG(a, b, c, d, x, s, ac) \
{(a) += MD5_G ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_HH(a, b, c, d, x, s, ac) \
{(a) += MD5_H ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_II(a, b, c, d, x, s, ac) \
{(a) += MD5_I ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
__global__ void cuda_md5_init()
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
working_states2[idx].len = 0;
buffer2[idx].len = 0;
}
bool cuda_device::init(uint32 device, const uint32 ihv1b[4], const uint32 ihv2b[4], const uint32 ihv2modb[4], const uint32 msg1b[16], const uint32 msg2b[16], uint32 hmask, uint32 dpmask, uint32 maxlen)
{
detail = new cuda_device_detail;
detail->device = device;
int deviceCount;
CUDA_SAFE_CALL( hipGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
cout << "There is no device supporting CUDA!" << endl;
return false;
}
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL( hipGetDeviceProperties(&deviceProp, device) );
if (deviceProp.major == 9999) {
cout << "Emulation device found." << endl;
return false;
}
cout << "CUDA device " << device << ": " << deviceProp.name << " (" << deviceProp.multiProcessorCount << " MPs)" << endl;
unsigned maxthreadspermp = deviceProp.maxThreadsPerMultiProcessor;
if (maxthreadspermp > MAX_CUDA_THREADS)
maxthreadspermp = (MAX_CUDA_THREADS/32)*32;
while (maxthreadspermp > deviceProp.regsPerMultiprocessor * REGISTERS_PER_CUDA_THREAD)
maxthreadspermp -= 32;
unsigned minblockspermp = 1;
while (maxthreadspermp > minblockspermp * deviceProp.maxThreadsPerBlock)
minblockspermp += 1;
while (maxthreadspermp * REGISTERS_PER_CUDA_THREAD > minblockspermp * deviceProp.regsPerBlock)
minblockspermp += 1;
detail->threadsperblock = ((maxthreadspermp / minblockspermp) / 32) * 32;
detail->blocks = minblockspermp * deviceProp.multiProcessorCount * 2;
cout << "Using " << detail->blocks << " blocks with " << detail->threadsperblock << " threads each: total " << detail->blocks * detail->threadsperblock << " threads." << endl;
CUDA_SAFE_CALL( hipSetDevice(device) );
CUDA_SAFE_CALL( hipSetDeviceFlags( hipDeviceScheduleBlockingSync ) );
CUDA_SAFE_CALL( hipHostMalloc( (void**)(&(detail->buffer_host)), detail->blocks * detail->threadsperblock * sizeof(trail_type) ) );
uint32 pc1[4], pc2[4];
uint32 a = ihv1b[0], b = ihv1b[1], c = ihv1b[2], d = ihv1b[3];
MD5_FF ( a, b, c, d, msg1b[ 0], 7, 3614090360); /* 1 */
MD5_FF ( d, a, b, c, msg1b[ 1], 12, 3905402710); /* 2 */
MD5_FF ( c, d, a, b, msg1b[ 2], 17, 606105819); /* 3 */
MD5_FF ( b, c, d, a, msg1b[ 3], 22, 3250441966); /* 4 */
MD5_FF ( a, b, c, d, msg1b[ 4], 7, 4118548399); /* 5 */
MD5_FF ( d, a, b, c, msg1b[ 5], 12, 1200080426); /* 6 */
MD5_FF ( c, d, a, b, msg1b[ 6], 17, 2821735955); /* 7 */
MD5_FF ( b, c, d, a, msg1b[ 7], 22, 4249261313); /* 8 */
MD5_FF ( a, b, c, d, msg1b[ 8], 7, 1770035416); /* 9 */
MD5_FF ( d, a, b, c, msg1b[ 9], 12, 2336552879); /* 10 */
MD5_FF ( c, d, a, b, msg1b[10], 17, 4294925233); /* 11 */
MD5_FF ( b, c, d, a, msg1b[11], 22, 2304563134); /* 12 */
MD5_FF ( a, b, c, d, msg1b[12], 7, 1804603682); /* 13 */
pc1[0] = a; pc1[1] = b; pc1[2] = c; pc1[3] = d;
a = ihv2b[0]; b = ihv2b[1]; c = ihv2b[2]; d = ihv2b[3];
MD5_FF ( a, b, c, d, msg2b[ 0], 7, 3614090360); /* 1 */
MD5_FF ( d, a, b, c, msg2b[ 1], 12, 3905402710); /* 2 */
MD5_FF ( c, d, a, b, msg2b[ 2], 17, 606105819); /* 3 */
MD5_FF ( b, c, d, a, msg2b[ 3], 22, 3250441966); /* 4 */
MD5_FF ( a, b, c, d, msg2b[ 4], 7, 4118548399); /* 5 */
MD5_FF ( d, a, b, c, msg2b[ 5], 12, 1200080426); /* 6 */
MD5_FF ( c, d, a, b, msg2b[ 6], 17, 2821735955); /* 7 */
MD5_FF ( b, c, d, a, msg2b[ 7], 22, 4249261313); /* 8 */
MD5_FF ( a, b, c, d, msg2b[ 8], 7, 1770035416); /* 9 */
MD5_FF ( d, a, b, c, msg2b[ 9], 12, 2336552879); /* 10 */
MD5_FF ( c, d, a, b, msg2b[10], 17, 4294925233); /* 11 */
MD5_FF ( b, c, d, a, msg2b[11], 22, 2304563134); /* 12 */
MD5_FF ( a, b, c, d, msg2b[12], 7, 1804603682); /* 13 */
pc2[0] = a; pc2[1] = b; pc2[2] = c; pc2[3] = d;
CUDA_SAFE_CALL( hipMemcpyToSymbol(msg1, msg1b, sizeof(msg1)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(msg2, msg2b, sizeof(msg2)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(ihv1, ihv1b, sizeof(ihv1)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(ihv2, ihv2b, sizeof(ihv2)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(ihv2mod, ihv2modb, sizeof(ihv2mod)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(precomp1, pc1, sizeof(pc1)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(precomp2, pc2, sizeof(pc2)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(hybridmask, &hmask, sizeof(hmask)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(distinguishedpointmask, &dpmask, sizeof(dpmask)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(maximumpathlength, &maxlen, sizeof(maxlen)) );
hipLaunchKernelGGL(( cuda_md5_init), dim3(detail->blocks), dim3(detail->threadsperblock), 0, 0, );
return true;
}
__global__ void cuda_md5_work(uint64 seed)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
buffer2[idx].len = 0;
uint32 len = working_states2[idx].len;
uint32 x = working_states2[idx].end[0];
uint32 y = working_states2[idx].end[1];
uint32 z = working_states2[idx].end[2];
if (len >= maximumpathlength || len == 0) {
x = uint32(seed>>32) ^ threadIdx.x;
y = uint32(seed) ^ blockIdx.x;
z = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
len = 0;
}
// __syncthreads();
for (unsigned j = 0; j < 0x100; ++j)
{
{
uint32* in = msg1;
uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3];
if (x > y) {
in = msg2;
a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3];
}
MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */
MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */
MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */
MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */
MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */
MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */
MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */
MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */
MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */
MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */
MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */
MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */
MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */
MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */
MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */
MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */
MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */
MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */
MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */
MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */
MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */
MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */
MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */
MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */
MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */
MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */
MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */
MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */
MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */
MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */
MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */
MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */
MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */
MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */
MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */
MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */
MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */
MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */
MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */
MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */
MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */
MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */
MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */
MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */
MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */
MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */
MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */
MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */
MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */
MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */
MD5_II ( b, c, d, a, in[ 9], 21, 3951481745); /* 64 */
if (x <= y) {
a += ihv1[0];
b += ihv1[1];
c += ihv1[2];
d += ihv1[3];
} else {
a += ihv2mod[0];
b += ihv2mod[1];
c += ihv2mod[2];
d += ihv2mod[3];
}
x = a;
y = d - c;
z = (d - b) & hybridmask;
++len;
}
{
if (0 == (x & distinguishedpointmask)) {
buffer2[idx].end[0] = x;
buffer2[idx].end[1] = y;
buffer2[idx].end[2] = z;
buffer2[idx].len = len;
buffer2[idx].start[0] = working_states2[idx].start[0];
buffer2[idx].start[1] = working_states2[idx].start[1];
buffer2[idx].start[2] = working_states2[idx].start[2];
x = uint32(seed>>32) ^ (threadIdx.x<<16) + len;
y = uint32(seed) ^ blockIdx.x;
z = 0;
len = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
}
}
// __syncthreads();
}
working_states2[idx].end[0] = x;
working_states2[idx].end[1] = y;
working_states2[idx].end[2] = z;
working_states2[idx].len = len;
}
__global__ void cuda_md5_workmod(uint64 seed)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
buffer2[idx].len = 0;
uint32 len = working_states2[idx].len;
uint32 x = working_states2[idx].end[0];
uint32 y = working_states2[idx].end[1];
uint32 z = working_states2[idx].end[2];
if (len >= maximumpathlength || len == 0) {
x = uint32(seed>>32) ^ threadIdx.x;
y = uint32(seed) ^ blockIdx.x;
z = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
len = 0;
}
// __syncthreads();
for (unsigned j = 0; j < 0x100; ++j)
{
{
uint32* in = msg1;
uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3];
if (x > y) {
in = msg2;
a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3];
}
MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */
MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */
MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */
MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */
MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */
MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */
MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */
MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */
MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */
MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */
MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */
MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */
MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */
MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */
MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */
MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */
MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */
MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */
MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */
MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */
MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */
MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */
MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */
MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */
MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */
MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */
MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */
MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */
MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */
MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */
MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */
MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */
MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */
MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */
MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */
MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */
MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */
MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */
MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */
MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */
MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */
MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */
MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */
MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */
MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */
MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */
MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */
MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */
MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */
MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */
if (x <= y) {
x = a + ihv1[0];
y = d + ihv1[3];
z = (c + ihv1[2]) & hybridmask;
} else {
x = a + ihv2mod[0];
y = d + ihv2mod[3];
z = (c + ihv2mod[2]) & hybridmask;
}
++len;
}
{
if (0 == (x & distinguishedpointmask)) {
buffer2[idx].end[0] = x;
buffer2[idx].end[1] = y;
buffer2[idx].end[2] = z;
buffer2[idx].len = len;
buffer2[idx].start[0] = working_states2[idx].start[0];
buffer2[idx].start[1] = working_states2[idx].start[1];
buffer2[idx].start[2] = working_states2[idx].start[2];
x = uint32(seed>>32) ^ (threadIdx.x<<16) + len;
y = uint32(seed) ^ blockIdx.x;
z = 0;
len = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
}
}
// __syncthreads();
}
working_states2[idx].end[0] = x;
working_states2[idx].end[1] = y;
working_states2[idx].end[2] = z;
working_states2[idx].len = len;
}
void cuda_device::cuda_fill_trail_buffer(uint32 id, uint64 seed,
vector<trail_type>& buf,
vector< pair<trail_type,trail_type> >& collisions, bool mod)
{
// transfer results
hipMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*detail->blocks*detail->threadsperblock);
// start new cuda computation
if (!mod)
hipLaunchKernelGGL(( cuda_md5_work), dim3(detail->blocks), dim3(detail->threadsperblock), 0, 0, seed);
else
hipLaunchKernelGGL(( cuda_md5_workmod), dim3(detail->blocks), dim3(detail->threadsperblock), 0, 0, seed);
// process and return results
buf.clear();
for (unsigned i = 0; i < detail->blocks*detail->threadsperblock; ++i)
if (detail->buffer_host[i].len)
buf.push_back(detail->buffer_host[i]);
}
#ifdef _WIN32
#include <windows.h>
#else
#include <sys/time.h>
#endif
class timer_detail;
class timer {
public:
timer(bool direct_start = false);
~timer();
void start();
void stop();
double time() const;// get time between start and stop (or now if still running) in seconds
bool isrunning() const { return running; } // check if timer is running
private:
timer_detail* detail;
bool running;
};
class timer_detail {
public:
#ifdef _WIN32
LARGE_INTEGER tstart, tend;
double freq;
#else
struct timeval tstart, tend;
struct timezone tz;
#endif
};
timer::~timer()
{
delete detail;
}
timer::timer(bool direct_start): running(false)
{
detail = new timer_detail;
#ifdef _WIN32
LARGE_INTEGER tmp_freq;
QueryPerformanceFrequency(&tmp_freq);
detail->freq = double(tmp_freq.QuadPart);
#endif
if (direct_start)
start();
}
#ifdef _WIN32
void timer::start()
{
running = true;
QueryPerformanceCounter(&detail->tstart);
}
void timer::stop()
{
QueryPerformanceCounter(&detail->tend);
running = false;
}
double timer::time() const
{
if (running)
{
LARGE_INTEGER tmp_end;
QueryPerformanceCounter(&tmp_end);
return (double(tmp_end.QuadPart) - double(detail->tstart.QuadPart))/detail->freq;
} else
return (double(detail->tend.QuadPart) - double(detail->tstart.QuadPart))/detail->freq;
}
#else
void timer::start()
{
running = true;
gettimeofday(&detail->tstart, &detail->tz);
}
void timer::stop()
{
gettimeofday(&detail->tend, &detail->tz);
running = false;
}
double timer::time() const
{
double t1 = double(detail->tstart.tv_sec) + (double(detail->tstart.tv_usec)/1e6);
if (running)
{
struct timeval tmp_end;
gettimeofday(&tmp_end, &detail->tz);
return double(tmp_end.tv_sec) + (double(tmp_end.tv_usec)/1e6) - t1;
} else
return double(detail->tend.tv_sec) + (double(detail->tend.tv_usec)/1e6) - t1;
}
#endif
void cuda_device::benchmark()
{
timer sw;
for (int blocksize = 4; blocksize <= 256; ++blocksize)
for (int threadsize = 250; threadsize <= 257; ++threadsize)
{
sw.start();
uint64 work = 0;
while (sw.time() < 10) {
hipLaunchKernelGGL(( cuda_md5_work), dim3(blocksize), dim3(threadsize), 0, 0, 0);
hipMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*blocksize*threadsize);
++work;
}
uint64 ow = work;
work *= 0x400 * blocksize * threadsize;
cout << blocksize << "x" << threadsize << ":\t" << work << " (" << ow << ")" << endl;
}
}
int get_num_cuda_devices()
{
int deviceCount = 0;
cutilSafeCall(hipGetDeviceCount(&deviceCount));
return deviceCount;
}
void cuda_device_query()
{
int deviceCount = 0;
cutilSafeCall(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
cutilSafeCall(hipGetDeviceProperties(&deviceProp, dev));
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %u bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n",
8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %u bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %u bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n",
deviceProp.deviceOverlap ? "Yes" : "No");
#endif
}
}
| 3b36c16a0a042f58e34009afe64f37e903ae5caf.cu | /**************************************************************************\
|
| Copyright (C) 2009 Marc Stevens
|
| This program is free software: you can redistribute it and/or modify
| it under the terms of the GNU General Public License as published by
| the Free Software Foundation, either version 3 of the License, or
| (at your option) any later version.
|
| This program is distributed in the hope that it will be useful,
| but WITHOUT ANY WARRANTY; without even the implied warranty of
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
| GNU General Public License for more details.
|
| You should have received a copy of the GNU General Public License
| along with this program. If not, see <http://www.gnu.org/licenses/>.
|
\**************************************************************************/
#include <iostream>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#include <stdexcept>
#include <boost/cstdint.hpp>
using namespace std;
typedef boost::uint32_t uint32;
typedef boost::uint64_t uint64;
#define MAX_CUDA_THREADS (1<<20)
#define REGISTERS_PER_CUDA_THREAD 32
#define TRAIL_NOCONSTRUCTOR
#include "birthday_types.hpp"
#ifndef CUDA_SAFE_CALL
#define CUDA_SAFE_CALL(s) { auto ce = s; if (ce != cudaSuccess) { throw std::runtime_error("CUDA API Error:\n" + std::string(cudaGetErrorName(ce)) + ":\n" + std::string(cudaGetErrorString(ce))); } }
#endif
#ifndef cutilSafeCall
#define cutilSafeCall(s) (s)
#endif
class cuda_device_detail {
public:
uint32 device;
uint32 blocks;
uint32 threadsperblock;
trail_type* buffer_host;
};
/* We assume that these are _thread specific_ (instead of global) storage managed by the cuda realtime libraries */
__device__ trail_type working_states2[MAX_CUDA_THREADS];
__device__ trail_type buffer2[MAX_CUDA_THREADS];
__constant__ uint32 msg1[16], msg2[16], ihv1[4], ihv2[4], ihv2mod[4];
__constant__ uint32 precomp1[4], precomp2[4];
__constant__ uint32 hybridmask, distinguishedpointmask, maximumpathlength;
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define MD5_F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define MD5_G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define MD5_H(x, y, z) ((x) ^ (y) ^ (z))
#define MD5_I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define MD5_FF(a, b, c, d, x, s, ac) \
{(a) += MD5_F ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_GG(a, b, c, d, x, s, ac) \
{(a) += MD5_G ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_HH(a, b, c, d, x, s, ac) \
{(a) += MD5_H ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define MD5_II(a, b, c, d, x, s, ac) \
{(a) += MD5_I ((b), (c), (d)) + (x) + (uint32)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
__global__ void cuda_md5_init()
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
working_states2[idx].len = 0;
buffer2[idx].len = 0;
}
bool cuda_device::init(uint32 device, const uint32 ihv1b[4], const uint32 ihv2b[4], const uint32 ihv2modb[4], const uint32 msg1b[16], const uint32 msg2b[16], uint32 hmask, uint32 dpmask, uint32 maxlen)
{
detail = new cuda_device_detail;
detail->device = device;
int deviceCount;
CUDA_SAFE_CALL( cudaGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
cout << "There is no device supporting CUDA!" << endl;
return false;
}
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL( cudaGetDeviceProperties(&deviceProp, device) );
if (deviceProp.major == 9999) {
cout << "Emulation device found." << endl;
return false;
}
cout << "CUDA device " << device << ": " << deviceProp.name << " (" << deviceProp.multiProcessorCount << " MPs)" << endl;
unsigned maxthreadspermp = deviceProp.maxThreadsPerMultiProcessor;
if (maxthreadspermp > MAX_CUDA_THREADS)
maxthreadspermp = (MAX_CUDA_THREADS/32)*32;
while (maxthreadspermp > deviceProp.regsPerMultiprocessor * REGISTERS_PER_CUDA_THREAD)
maxthreadspermp -= 32;
unsigned minblockspermp = 1;
while (maxthreadspermp > minblockspermp * deviceProp.maxThreadsPerBlock)
minblockspermp += 1;
while (maxthreadspermp * REGISTERS_PER_CUDA_THREAD > minblockspermp * deviceProp.regsPerBlock)
minblockspermp += 1;
detail->threadsperblock = ((maxthreadspermp / minblockspermp) / 32) * 32;
detail->blocks = minblockspermp * deviceProp.multiProcessorCount * 2;
cout << "Using " << detail->blocks << " blocks with " << detail->threadsperblock << " threads each: total " << detail->blocks * detail->threadsperblock << " threads." << endl;
CUDA_SAFE_CALL( cudaSetDevice(device) );
CUDA_SAFE_CALL( cudaSetDeviceFlags( cudaDeviceBlockingSync ) );
CUDA_SAFE_CALL( cudaMallocHost( (void**)(&(detail->buffer_host)), detail->blocks * detail->threadsperblock * sizeof(trail_type) ) );
uint32 pc1[4], pc2[4];
uint32 a = ihv1b[0], b = ihv1b[1], c = ihv1b[2], d = ihv1b[3];
MD5_FF ( a, b, c, d, msg1b[ 0], 7, 3614090360); /* 1 */
MD5_FF ( d, a, b, c, msg1b[ 1], 12, 3905402710); /* 2 */
MD5_FF ( c, d, a, b, msg1b[ 2], 17, 606105819); /* 3 */
MD5_FF ( b, c, d, a, msg1b[ 3], 22, 3250441966); /* 4 */
MD5_FF ( a, b, c, d, msg1b[ 4], 7, 4118548399); /* 5 */
MD5_FF ( d, a, b, c, msg1b[ 5], 12, 1200080426); /* 6 */
MD5_FF ( c, d, a, b, msg1b[ 6], 17, 2821735955); /* 7 */
MD5_FF ( b, c, d, a, msg1b[ 7], 22, 4249261313); /* 8 */
MD5_FF ( a, b, c, d, msg1b[ 8], 7, 1770035416); /* 9 */
MD5_FF ( d, a, b, c, msg1b[ 9], 12, 2336552879); /* 10 */
MD5_FF ( c, d, a, b, msg1b[10], 17, 4294925233); /* 11 */
MD5_FF ( b, c, d, a, msg1b[11], 22, 2304563134); /* 12 */
MD5_FF ( a, b, c, d, msg1b[12], 7, 1804603682); /* 13 */
pc1[0] = a; pc1[1] = b; pc1[2] = c; pc1[3] = d;
a = ihv2b[0]; b = ihv2b[1]; c = ihv2b[2]; d = ihv2b[3];
MD5_FF ( a, b, c, d, msg2b[ 0], 7, 3614090360); /* 1 */
MD5_FF ( d, a, b, c, msg2b[ 1], 12, 3905402710); /* 2 */
MD5_FF ( c, d, a, b, msg2b[ 2], 17, 606105819); /* 3 */
MD5_FF ( b, c, d, a, msg2b[ 3], 22, 3250441966); /* 4 */
MD5_FF ( a, b, c, d, msg2b[ 4], 7, 4118548399); /* 5 */
MD5_FF ( d, a, b, c, msg2b[ 5], 12, 1200080426); /* 6 */
MD5_FF ( c, d, a, b, msg2b[ 6], 17, 2821735955); /* 7 */
MD5_FF ( b, c, d, a, msg2b[ 7], 22, 4249261313); /* 8 */
MD5_FF ( a, b, c, d, msg2b[ 8], 7, 1770035416); /* 9 */
MD5_FF ( d, a, b, c, msg2b[ 9], 12, 2336552879); /* 10 */
MD5_FF ( c, d, a, b, msg2b[10], 17, 4294925233); /* 11 */
MD5_FF ( b, c, d, a, msg2b[11], 22, 2304563134); /* 12 */
MD5_FF ( a, b, c, d, msg2b[12], 7, 1804603682); /* 13 */
pc2[0] = a; pc2[1] = b; pc2[2] = c; pc2[3] = d;
CUDA_SAFE_CALL( cudaMemcpyToSymbol(msg1, msg1b, sizeof(msg1)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(msg2, msg2b, sizeof(msg2)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv1, ihv1b, sizeof(ihv1)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv2, ihv2b, sizeof(ihv2)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv2mod, ihv2modb, sizeof(ihv2mod)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(precomp1, pc1, sizeof(pc1)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(precomp2, pc2, sizeof(pc2)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(hybridmask, &hmask, sizeof(hmask)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(distinguishedpointmask, &dpmask, sizeof(dpmask)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(maximumpathlength, &maxlen, sizeof(maxlen)) );
cuda_md5_init<<<detail->blocks, detail->threadsperblock>>>();
return true;
}
__global__ void cuda_md5_work(uint64 seed)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
buffer2[idx].len = 0;
uint32 len = working_states2[idx].len;
uint32 x = working_states2[idx].end[0];
uint32 y = working_states2[idx].end[1];
uint32 z = working_states2[idx].end[2];
if (len >= maximumpathlength || len == 0) {
x = uint32(seed>>32) ^ threadIdx.x;
y = uint32(seed) ^ blockIdx.x;
z = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
len = 0;
}
// __syncthreads();
for (unsigned j = 0; j < 0x100; ++j)
{
{
uint32* in = msg1;
uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3];
if (x > y) {
in = msg2;
a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3];
}
MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */
MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */
MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */
MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */
MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */
MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */
MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */
MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */
MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */
MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */
MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */
MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */
MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */
MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */
MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */
MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */
MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */
MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */
MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */
MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */
MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */
MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */
MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */
MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */
MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */
MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */
MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */
MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */
MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */
MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */
MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */
MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */
MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */
MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */
MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */
MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */
MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */
MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */
MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */
MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */
MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */
MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */
MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */
MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */
MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */
MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */
MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */
MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */
MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */
MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */
MD5_II ( b, c, d, a, in[ 9], 21, 3951481745); /* 64 */
if (x <= y) {
a += ihv1[0];
b += ihv1[1];
c += ihv1[2];
d += ihv1[3];
} else {
a += ihv2mod[0];
b += ihv2mod[1];
c += ihv2mod[2];
d += ihv2mod[3];
}
x = a;
y = d - c;
z = (d - b) & hybridmask;
++len;
}
{
if (0 == (x & distinguishedpointmask)) {
buffer2[idx].end[0] = x;
buffer2[idx].end[1] = y;
buffer2[idx].end[2] = z;
buffer2[idx].len = len;
buffer2[idx].start[0] = working_states2[idx].start[0];
buffer2[idx].start[1] = working_states2[idx].start[1];
buffer2[idx].start[2] = working_states2[idx].start[2];
x = uint32(seed>>32) ^ (threadIdx.x<<16) + len;
y = uint32(seed) ^ blockIdx.x;
z = 0;
len = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
}
}
// __syncthreads();
}
working_states2[idx].end[0] = x;
working_states2[idx].end[1] = y;
working_states2[idx].end[2] = z;
working_states2[idx].len = len;
}
__global__ void cuda_md5_workmod(uint64 seed)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
buffer2[idx].len = 0;
uint32 len = working_states2[idx].len;
uint32 x = working_states2[idx].end[0];
uint32 y = working_states2[idx].end[1];
uint32 z = working_states2[idx].end[2];
if (len >= maximumpathlength || len == 0) {
x = uint32(seed>>32) ^ threadIdx.x;
y = uint32(seed) ^ blockIdx.x;
z = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
len = 0;
}
// __syncthreads();
for (unsigned j = 0; j < 0x100; ++j)
{
{
uint32* in = msg1;
uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3];
if (x > y) {
in = msg2;
a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3];
}
MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */
MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */
MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */
MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */
MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */
MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */
MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */
MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */
MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */
MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */
MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */
MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */
MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */
MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */
MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */
MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */
MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */
MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */
MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */
MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */
MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */
MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */
MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */
MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */
MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */
MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */
MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */
MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */
MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */
MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */
MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */
MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */
MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */
MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */
MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */
MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */
MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */
MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */
MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */
MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */
MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */
MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */
MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */
MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */
MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */
MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */
MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */
MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */
MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */
MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */
if (x <= y) {
x = a + ihv1[0];
y = d + ihv1[3];
z = (c + ihv1[2]) & hybridmask;
} else {
x = a + ihv2mod[0];
y = d + ihv2mod[3];
z = (c + ihv2mod[2]) & hybridmask;
}
++len;
}
{
if (0 == (x & distinguishedpointmask)) {
buffer2[idx].end[0] = x;
buffer2[idx].end[1] = y;
buffer2[idx].end[2] = z;
buffer2[idx].len = len;
buffer2[idx].start[0] = working_states2[idx].start[0];
buffer2[idx].start[1] = working_states2[idx].start[1];
buffer2[idx].start[2] = working_states2[idx].start[2];
x = uint32(seed>>32) ^ (threadIdx.x<<16) + len;
y = uint32(seed) ^ blockIdx.x;
z = 0;
len = 0;
working_states2[idx].start[0] = x;
working_states2[idx].start[1] = y;
working_states2[idx].start[2] = z;
}
}
// __syncthreads();
}
working_states2[idx].end[0] = x;
working_states2[idx].end[1] = y;
working_states2[idx].end[2] = z;
working_states2[idx].len = len;
}
void cuda_device::cuda_fill_trail_buffer(uint32 id, uint64 seed,
vector<trail_type>& buf,
vector< pair<trail_type,trail_type> >& collisions, bool mod)
{
// transfer results
cudaMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*detail->blocks*detail->threadsperblock);
// start new cuda computation
if (!mod)
cuda_md5_work<<<detail->blocks, detail->threadsperblock>>>(seed);
else
cuda_md5_workmod<<<detail->blocks, detail->threadsperblock>>>(seed);
// process and return results
buf.clear();
for (unsigned i = 0; i < detail->blocks*detail->threadsperblock; ++i)
if (detail->buffer_host[i].len)
buf.push_back(detail->buffer_host[i]);
}
#ifdef _WIN32
#include <windows.h>
#else
#include <sys/time.h>
#endif
class timer_detail;
class timer {
public:
timer(bool direct_start = false);
~timer();
void start();
void stop();
double time() const;// get time between start and stop (or now if still running) in seconds
bool isrunning() const { return running; } // check if timer is running
private:
timer_detail* detail;
bool running;
};
class timer_detail {
public:
#ifdef _WIN32
LARGE_INTEGER tstart, tend;
double freq;
#else
struct timeval tstart, tend;
struct timezone tz;
#endif
};
timer::~timer()
{
delete detail;
}
timer::timer(bool direct_start): running(false)
{
detail = new timer_detail;
#ifdef _WIN32
LARGE_INTEGER tmp_freq;
QueryPerformanceFrequency(&tmp_freq);
detail->freq = double(tmp_freq.QuadPart);
#endif
if (direct_start)
start();
}
#ifdef _WIN32
void timer::start()
{
running = true;
QueryPerformanceCounter(&detail->tstart);
}
void timer::stop()
{
QueryPerformanceCounter(&detail->tend);
running = false;
}
double timer::time() const
{
if (running)
{
LARGE_INTEGER tmp_end;
QueryPerformanceCounter(&tmp_end);
return (double(tmp_end.QuadPart) - double(detail->tstart.QuadPart))/detail->freq;
} else
return (double(detail->tend.QuadPart) - double(detail->tstart.QuadPart))/detail->freq;
}
#else
void timer::start()
{
running = true;
gettimeofday(&detail->tstart, &detail->tz);
}
void timer::stop()
{
gettimeofday(&detail->tend, &detail->tz);
running = false;
}
double timer::time() const
{
double t1 = double(detail->tstart.tv_sec) + (double(detail->tstart.tv_usec)/1e6);
if (running)
{
struct timeval tmp_end;
gettimeofday(&tmp_end, &detail->tz);
return double(tmp_end.tv_sec) + (double(tmp_end.tv_usec)/1e6) - t1;
} else
return double(detail->tend.tv_sec) + (double(detail->tend.tv_usec)/1e6) - t1;
}
#endif
void cuda_device::benchmark()
{
timer sw;
for (int blocksize = 4; blocksize <= 256; ++blocksize)
for (int threadsize = 250; threadsize <= 257; ++threadsize)
{
sw.start();
uint64 work = 0;
while (sw.time() < 10) {
cuda_md5_work<<<blocksize, threadsize>>>(0);
cudaMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*blocksize*threadsize);
++work;
}
uint64 ow = work;
work *= 0x400 * blocksize * threadsize;
cout << blocksize << "x" << threadsize << ":\t" << work << " (" << ow << ")" << endl;
}
}
int get_num_cuda_devices()
{
int deviceCount = 0;
cutilSafeCall(cudaGetDeviceCount(&deviceCount));
return deviceCount;
}
void cuda_device_query()
{
int deviceCount = 0;
cutilSafeCall(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cutilSafeCall(cudaGetDeviceProperties(&deviceProp, dev));
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %u bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n",
8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %u bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %u bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n",
deviceProp.deviceOverlap ? "Yes" : "No");
#endif
}
}
|
04e0a69437e1d8a60010ac003f8aac55d3002af3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
struct node{
int dst;
struct node* next;
};
struct list{
struct node *head;
};
struct graph{
int n;
struct list* set;
};
struct node* new_node(int dst){
struct node* newnode = (struct node*)malloc(sizeof(struct node));
newnode -> dst = dst;
newnode -> next = NULL;
return newnode;
}
struct graph* new_graph(int n){
struct graph* newgraph = (struct graph*)malloc(sizeof(struct node));
newgraph -> n = n;
newgraph -> set = (struct list*)malloc(n * sizeof(struct list)) ;
int i;
for(i=0;i<n;i++)
newgraph->set[i].head = NULL;
return newgraph;
}
void addEdge(struct graph* gph, int src, int dst){
struct node* newnode = new_node(dst);
newnode->next = gph->set[src].head;
gph->set[src].head = newnode;
newnode = new_node(src);
newnode->next = gph->set[dst].head;
gph->set[dst].head = newnode;
}
__global__ void add( int *a, int *b, int *c, int vector_size ) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
if (tid < vector_size){
// Compute the addition
c[tid] = a[tid] + b[tid];
}
}
long get_vert(char *str){
char vert[20];
int space_count = 0;
int num_vert=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 2){
vert[j] = str[i];
j++;
}
else if(space_count>2)
break;
i++;
}
vert[j] = '\0';
//printf("%s\n", vert);
num_vert = atoi(vert);
//printf("%d\n", num_vert);
return num_vert;
}
int get_src(char *str){
char s[20];
int space_count = 0;
int src=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 0){
s[j] = str[i];
j++;
}
else
break;
i++;
}
s[j] = '\0';
//printf("%s\n", s);
src = atoi(s);
//printf("%d\n", src);
return src;
}
int get_dst(char *str){
char d[20];
int space_count = 0;
int dst=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 1){
d[j] = str[i];
j++;
}
else if(space_count>1)
break;
i++;
}
d[j] = '\0';
//printf("%s\n", d);
dst = atoi(d);
//printf("%d\n", dst);
return dst;
}
int compare (const void * a, const void * b)
{
return ( *(int*)b - *(int*)a );
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments
if (argc != 3) {
// Tell the user how to run the program
printf ("Usage: %s vector_size block_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int block_size = atoi(argv[2]);
int grid_size = ((vector_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
hipSetDevice(0);
// Time Variables
hipEvent_t start, stop;
float time;
hipEventCreate (&start);
hipEventCreate (&stop);
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
printf("Initializing input arrays.\n");
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
hipEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
c_cpu[i] = a[i] + b[i];
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
int actual_size = vector_size * sizeof(int);
// allocate the memory on the GPU
hipMalloc(&dev_a,actual_size);
hipMalloc(&dev_b,actual_size);
hipMalloc(&dev_c,actual_size);
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy(dev_a,a,actual_size,hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,actual_size,hipMemcpyHostToDevice);
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
hipEventRecord(start,0);
// call the kernel
//add<<<grid_size,block_size>>>(dev_a,dev_b,dev_c,actual_size);
hipLaunchKernelGGL(( add), dim3(vector_size),dim3(1), 0, 0, dev_a,dev_b,dev_c,actual_size);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
// HERE (there's one more at the end, don't miss it!)
hipMemcpy(c_gpu,dev_c,actual_size,hipMemcpyDeviceToHost);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free CPU data
free (a);
free (b);
free (c_cpu);
free (c_gpu);
// free the memory allocated on the GPU
// HERE
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 04e0a69437e1d8a60010ac003f8aac55d3002af3.cu | #include <stdio.h>
#include <stdlib.h>
struct node{
int dst;
struct node* next;
};
struct list{
struct node *head;
};
struct graph{
int n;
struct list* set;
};
struct node* new_node(int dst){
struct node* newnode = (struct node*)malloc(sizeof(struct node));
newnode -> dst = dst;
newnode -> next = NULL;
return newnode;
}
struct graph* new_graph(int n){
struct graph* newgraph = (struct graph*)malloc(sizeof(struct node));
newgraph -> n = n;
newgraph -> set = (struct list*)malloc(n * sizeof(struct list)) ;
int i;
for(i=0;i<n;i++)
newgraph->set[i].head = NULL;
return newgraph;
}
void addEdge(struct graph* gph, int src, int dst){
struct node* newnode = new_node(dst);
newnode->next = gph->set[src].head;
gph->set[src].head = newnode;
newnode = new_node(src);
newnode->next = gph->set[dst].head;
gph->set[dst].head = newnode;
}
__global__ void add( int *a, int *b, int *c, int vector_size ) {
// Calculate the index in the vector for the thread using the internal variables
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
// This if statement is added in case we have more threads executing
// Than number of elements in the vectors. How can this help?
if (tid < vector_size){
// Compute the addition
c[tid] = a[tid] + b[tid];
}
}
long get_vert(char *str){
char vert[20];
int space_count = 0;
int num_vert=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 2){
vert[j] = str[i];
j++;
}
else if(space_count>2)
break;
i++;
}
vert[j] = '\0';
//printf("%s\n", vert);
num_vert = atoi(vert);
//printf("%d\n", num_vert);
return num_vert;
}
int get_src(char *str){
char s[20];
int space_count = 0;
int src=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 0){
s[j] = str[i];
j++;
}
else
break;
i++;
}
s[j] = '\0';
//printf("%s\n", s);
src = atoi(s);
//printf("%d\n", src);
return src;
}
int get_dst(char *str){
char d[20];
int space_count = 0;
int dst=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 1){
d[j] = str[i];
j++;
}
else if(space_count>1)
break;
i++;
}
d[j] = '\0';
//printf("%s\n", d);
dst = atoi(d);
//printf("%d\n", dst);
return dst;
}
int compare (const void * a, const void * b)
{
return ( *(int*)b - *(int*)a );
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments
if (argc != 3) {
// Tell the user how to run the program
printf ("Usage: %s vector_size block_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int block_size = atoi(argv[2]);
int grid_size = ((vector_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
float time;
cudaEventCreate (&start);
cudaEventCreate (&stop);
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
printf("Initializing input arrays.\n");
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
cudaEventRecord(start,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
c_cpu[i] = a[i] + b[i];
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tSequential Job Time: %.2f ms\n", time);
int actual_size = vector_size * sizeof(int);
// allocate the memory on the GPU
cudaMalloc(&dev_a,actual_size);
cudaMalloc(&dev_b,actual_size);
cudaMalloc(&dev_c,actual_size);
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy(dev_a,a,actual_size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,actual_size,cudaMemcpyHostToDevice);
//
// GPU Calculation
////////////////////////
printf("Running parallel job.\n");
cudaEventRecord(start,0);
// call the kernel
//add<<<grid_size,block_size>>>(dev_a,dev_b,dev_c,actual_size);
add<<<vector_size,1>>>(dev_a,dev_b,dev_c,actual_size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\tParallel Job Time: %.2f ms\n", time);
// copy the array 'c' back from the GPU to the CPU
// HERE (there's one more at the end, don't miss it!)
cudaMemcpy(c_gpu,dev_c,actual_size,cudaMemcpyDeviceToHost);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free CPU data
free (a);
free (b);
free (c_cpu);
free (c_gpu);
// free the memory allocated on the GPU
// HERE
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
a2e7b8b8605d7bfa4ead89da96f8bdc89aa3307c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "intersection_algorithm_in_parallel.hpp"
#define BLOCKSIZE 64
__global__ void cuda_kernel1_calculate_intervals(
distance_t *results_p,
coordinate_t *points_curve1_p,
coordinate_t *points_curve2_p,
curve_size_t *curve1_size_p,
curve_size_t *curve2_size_p,
curve_size_t *point_dimensions_p,
distance_t *radius_p,
bool *is_last
);
Cuda_intersection::Cuda_intersection(const Curve& curve1,const Curve& curve2, distance_t *host_res_p, distance_t eps) : eps{eps} {
hipError_t cudaStatus;
host_results_p = host_res_p;
point_dimensions = curve1[0].coordinates.size();
points_curve1_p = (coordinate_t*)malloc(sizeof(coordinate_t)*point_dimensions*curve1.size());
points_curve2_p = (coordinate_t*)malloc(sizeof(coordinate_t)*point_dimensions*curve2.size());
//If we have the opportunity to use multiple gpus then we will split curve2 onto the gpus, but give each gpu
//the complete first curve.
cudaStatus = hipGetDeviceCount(&number_devices);
if(cudaStatus != hipSuccess){
std::cerr << "cudaGetDeviceCout failed! Do you have CUDA-capable GPU installed?" << std::endl;
goto Error;
}
//If splitting curve2 ist not reasonable, do not do it
if(curve2.size() <= 2*number_devices){
number_devices = 1;
}
dev_radius_p.reserve(number_devices);
dev_points_curve1_p.reserve(number_devices);
dev_points_curve2_p.reserve(number_devices);
dev_curve1_size_p.reserve(number_devices);
dev_curve2_size_p.reserve(number_devices);
dev_point_dimensions_p.reserve(number_devices);
dev_results_p.reserve(number_devices);
dev_is_last_kernel_p.reserve(number_devices);
curve1_size.reserve(number_devices);
curve2_size.reserve(number_devices);
curve1_start_index.reserve(number_devices);
curve2_start_index.reserve(number_devices);
curve1_end_index.reserve(number_devices);
curve2_end_index.reserve(number_devices);
curve1_start_index[0] = 0;
curve2_start_index[0] = 0;
if(number_devices != 1){
curve1_end_index[0] = curve1.size()-1;
if(ceil(curve2.size() / number_devices) > curve2.size()-1){
curve2_end_index[0] = curve2.size() -1;
}else{
curve2_end_index[0] = ceil(curve2.size() / number_devices);
}
}else{
curve1_end_index[0] = curve1.size() - 1;
curve2_end_index[0] = curve2.size() - 1;
}
curve1_size[0] = curve1_end_index[0]-curve1_start_index[0]+1;
curve2_size[0] = curve2_end_index[0]-curve2_start_index[0]+1;
//Fill the points:
for( curve_size_t i = 0; i < curve1.size(); i++){
for( curve_size_t j = 0; j < point_dimensions; j++){
points_curve1_p[i*point_dimensions + j] = curve1[i].coordinates[j];
}
}
for( curve_size_t i = 0; i < curve2.size(); i++){
for( curve_size_t j = 0; j < point_dimensions; j++){
points_curve2_p[i*point_dimensions + j] = curve2[i].coordinates[j];
}
}
for(short device_nbr = 1; device_nbr < number_devices; device_nbr++){
curve1_start_index[device_nbr] = 0;
curve2_start_index[device_nbr] = curve2_end_index[device_nbr-1] - 1;
curve1_end_index[device_nbr] = curve1.size() - 1;
if(not device_nbr == number_devices - 1){
curve2_end_index[device_nbr] = curve2_start_index[device_nbr-1] + ceil(curve2.size() / number_devices);
}else{
curve2_end_index[device_nbr] = curve2.size() - 1;
}
curve1_size[device_nbr] = curve1_end_index[device_nbr]-curve1_start_index[device_nbr]+1;
curve2_size[device_nbr] = curve2_end_index[device_nbr]-curve2_start_index[device_nbr]+1;
}
#if DEBUG
std::cout << "NUMBER DEVICES: " << number_devices << std::endl;
#endif
for(short device_nbr = 0; device_nbr < number_devices; device_nbr++){
//Choose which GPU to run on, change this on a multi-GPU system if needed!
cudaStatus = hipSetDevice(device_nbr);
if(cudaStatus != hipSuccess){
std::cerr << "CUDASetDevice failed! Do you have CUDA-capable GPU installed?" << std::endl;
}
//Allocate buffers on the GPU
cudaStatus = hipMalloc((void**)&dev_results_p[device_nbr], sizeof(distance_t)*2*(2 * curve1_size[device_nbr] * curve2_size[device_nbr]));
if(cudaStatus != hipSuccess){
std::cerr << "CudaMalloc dev_results failed!" << std::endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_points_curve1_p[device_nbr], sizeof(coordinate_t)* point_dimensions * curve1_size[device_nbr]);
if(cudaStatus != hipSuccess){
std::cerr << "CudaMalloc dev_points_curve1 failed!" << std::endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_points_curve2_p[device_nbr], sizeof(coordinate_t)* point_dimensions * curve2_size[device_nbr]);
if(cudaStatus != hipSuccess){
std::cerr << "CudaMalloc dev_points_curve2 failed!" << std::endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_curve1_size_p[device_nbr], sizeof(curve_size_t));
if(cudaStatus != hipSuccess){
std::cerr << "CudaMalloc dev_curve1_size_p failed!" << std::endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_curve2_size_p[device_nbr], sizeof(curve_size_t));
if(cudaStatus != hipSuccess){
std::cerr << "CudaMalloc dev_curve2_size_p failed!" << std::endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_point_dimensions_p[device_nbr], sizeof(curve_size_t));
if(cudaStatus != hipSuccess){
std::cerr << "CudaMalloc dev_point_dimensions_p failed!" << std::endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_radius_p[device_nbr], sizeof(distance_t));
if(cudaStatus != hipSuccess){
std::cerr << "CudaMalloc dev_radius failed!" << std::endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_is_last_kernel_p[device_nbr], sizeof(bool));
if(cudaStatus != hipSuccess){
std::cerr << "CudaMalloc dev_is_last_kernel_p failed!" << std::endl;
goto Error;
}
//Copy Data into device memory
cudaStatus = hipMemcpy(dev_points_curve1_p[device_nbr], &points_curve1_p[curve1_start_index[device_nbr]], sizeof(coordinate_t)* point_dimensions * (curve1_end_index[device_nbr]-curve1_start_index[device_nbr]+1), hipMemcpyHostToDevice );
if (cudaStatus != hipSuccess ){
std::cerr << "CudaMemcpy curve1_points to dev_points_curve1_p failed!" << std::endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_points_curve2_p[device_nbr], &points_curve2_p[curve2_start_index[device_nbr]], sizeof(coordinate_t)* point_dimensions * (curve2_end_index[device_nbr]-curve2_start_index[device_nbr]+1), hipMemcpyHostToDevice );
if (cudaStatus != hipSuccess ){
std::cerr << "CudaMemcpy curve2_points to dev_points_curve2_p failed!" << std::endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_curve1_size_p[device_nbr], &curve1_size[device_nbr], sizeof(curve_size_t), hipMemcpyHostToDevice );
if (cudaStatus != hipSuccess ){
std::cerr << "CudaMemcpy curve.size() host to device failed!" << std::endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_curve2_size_p[device_nbr], &curve2_size[device_nbr], sizeof(curve_size_t), hipMemcpyHostToDevice );
if (cudaStatus != hipSuccess ){
std::cerr << "CudaMemcpy curve2.size() host to device failed!" << std::endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_point_dimensions_p[device_nbr], &point_dimensions, sizeof(curve_size_t), hipMemcpyHostToDevice );
if (cudaStatus != hipSuccess ){
std::cerr << "CudaMemcpy point_dimensions host to device failed!" << std::endl;
goto Error;
}
if(device_nbr == number_devices-1){
cudaStatus = hipMemcpy(dev_is_last_kernel_p[device_nbr], &is_last ,sizeof(bool), hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess){
std::cerr << "CudaMemcpy dev_is_last_kernel_p failed!" << std::endl;
goto Error;
}
}else{
cudaStatus = hipMemcpy(dev_is_last_kernel_p[device_nbr], &is_not_last ,sizeof(bool), hipMemcpyHostToDevice);
if(cudaStatus != hipSuccess){
std::cerr << "CudaMemcpy dev_is_last_kernel_p failed!" << std::endl;
goto Error;
}
}
}
goto NoError;
Error:
free_memory();
NoError:
return;
}
void Cuda_intersection::free_memory(){
if(not is_buffers_free){
for(short device_nbr = 0; device_nbr < number_devices; device_nbr++){
hipFree(dev_results_p[device_nbr]);
hipFree(dev_points_curve1_p[device_nbr]);
hipFree(dev_points_curve2_p[device_nbr]);
hipFree(dev_radius_p[device_nbr]);
hipFree(dev_curve1_size_p[device_nbr]);
hipFree(dev_curve2_size_p[device_nbr]);
hipFree(dev_point_dimensions_p[device_nbr]);
hipFree(dev_is_last_kernel_p[device_nbr]);
}
free(points_curve1_p);
free(points_curve2_p);
is_buffers_free = true;
}
}
void Cuda_intersection::intersection_interval_cuda(
distance_t radius
){
hipError_t cudaStatus = intersection_interval_call_gpu(radius);
if(cudaStatus != hipSuccess){
std::cerr << "intersection_interval_call_gpu failed!" << std::endl;
}
}
__global__ void cuda_kernel1_calculate_intervals(
distance_t *results_p,
coordinate_t *points_curve1_p,
coordinate_t *points_curve2_p,
curve_size_t *curve1_size_p,
curve_size_t *curve2_size_p,
curve_size_t *point_dimensions_p,
distance_t *radius_p,
bool *is_last,
distance_t eps
){
thread_id_t thread_id = threadIdx.x + blockIdx.x * blockDim.x;
curve_size_t curve1_index = (curve_size_t)(thread_id % (*curve1_size_p));
curve_size_t curve2_index = (curve_size_t)(thread_id / (*curve1_size_p));
if(curve1_index >= *curve1_size_p or curve2_index >= *curve2_size_p){
return;
}
if(curve1_index == *curve1_size_p - 1 and not *is_last){
return;
}
if(curve2_index == *curve2_size_p - 1 and not *is_last){
return;
}
coordinate_t *circle_center_p;
coordinate_t *line_start_p;
coordinate_t *line_end_p;
//If this is not getting set then the interval does not have to be calculated
bool need_to_calculate_v1 = false;
bool need_to_calculate_v2 = false;
short reps = 0;
if(curve1_index < *curve1_size_p -1 and curve2_index > 0){
need_to_calculate_v1 = true;
}
if(curve2_index < *curve2_size_p -1 and curve1_index > 0){
need_to_calculate_v2 = true;
}
if(need_to_calculate_v1 and need_to_calculate_v2){
reps = 2;
}else if(need_to_calculate_v1){
reps = 1;
}else if(need_to_calculate_v2){
reps = 1;
}
//Do the intersection algorithm if needed
short current_repetition = 1;
while(current_repetition <= reps){
if(need_to_calculate_v1){
need_to_calculate_v1 = false;
circle_center_p = &points_curve2_p[curve2_index * *point_dimensions_p];
line_start_p = &points_curve1_p[curve1_index * *point_dimensions_p];
line_end_p = &points_curve1_p[(curve1_index+1) * *point_dimensions_p];
}
else if(need_to_calculate_v2){
need_to_calculate_v2 = false;
circle_center_p = &points_curve1_p[curve1_index * *point_dimensions_p];
line_start_p = &points_curve2_p[curve2_index * *point_dimensions_p];
line_end_p = &points_curve2_p[(curve2_index+1) * *point_dimensions_p];
}else {
return;
}
//const distance_t eps = 0.001 / 4;
const distance_t save_eps = 0.5 * eps;
const distance_t save_eps_half = 0.25 * eps;
distance_t radius_sqr = *radius_p * *radius_p;
distance_t dist_a = 0;
distance_t dist_b = 0;
distance_t dist_c = - radius_sqr;
distance_t mid;
distance_t discriminant;
distance_t sqrt_discr = 0.;
distance_t begin, end;
bool smallDistAtZero;
bool smallDistAtOne;
bool smallDistAtMid;
bool sqrt_discr_computed = false;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
coordinate_t end_minus_start_coordinate = line_end_p[coordinate] - line_start_p[coordinate];
dist_a += end_minus_start_coordinate * end_minus_start_coordinate;
dist_b += (line_start_p[coordinate] - circle_center_p[coordinate]) * end_minus_start_coordinate;
dist_c += powf(line_start_p[coordinate] - circle_center_p[coordinate], 2);
}
mid = - dist_b / dist_a;
discriminant = mid * mid - dist_c / dist_a;
distance_t circle_center_dist0_sqr = 0;
distance_t circle_center_dist1_sqr = 0;
distance_t circle_center_dist_mid_sqr = 0;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
distance_t coordinate_for_dist0 = line_start_p[coordinate] * 1.;
distance_t coordinate_for_dist1 = line_end_p[coordinate] * 1.;
distance_t coordinate_for_dist_mid = line_start_p[coordinate] * (1. - mid) + line_end_p[coordinate] * mid;
circle_center_dist0_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist0, 2);
circle_center_dist1_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist1, 2);
circle_center_dist_mid_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist_mid, 2);
if(coordinate == *point_dimensions_p - 1){
smallDistAtZero = circle_center_dist0_sqr <= radius_sqr;
smallDistAtOne = circle_center_dist1_sqr <= radius_sqr;
smallDistAtMid = circle_center_dist_mid_sqr <= radius_sqr;
}
}
if(smallDistAtZero and smallDistAtOne){
if(current_repetition == 2){
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))] = 0;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))+1] = 1;
return;
}else{
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )] = 0;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index)+1] = 1;
current_repetition++;
continue;
}
}
if(not smallDistAtMid and smallDistAtZero){
mid = 0.;
smallDistAtMid = true;
}else if(not smallDistAtMid and smallDistAtOne){
mid = 1.;
smallDistAtMid = true;
}
if(not smallDistAtMid){
if(current_repetition == 2){
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))+1] = 0.;
return;
}else{
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )+1] = 0.;
current_repetition++;
continue;
}
}
if(mid <= 0. and not smallDistAtZero){
if(current_repetition == 2){
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))+1] = 0.;
return;
}else{
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )+1] = 0.;
current_repetition++;
continue;
}
}
if(mid >= 1. and not smallDistAtOne){
if(current_repetition == 2){
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))+1] = 0.;
return;
}else{
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index)] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index)+1] = 0.;
current_repetition++;
continue;
}
}
if(discriminant < 0.){
discriminant = 0.;
}
sqrt_discr = 0.;
if(smallDistAtZero){
begin = 0.;
}else{
sqrt_discr = (distance_t)sqrtf(discriminant);
sqrt_discr_computed = true;
const distance_t lambda1 = mid - sqrt_discr;
const distance_t outershift = lambda1 - save_eps_half;
distance_t innershift;
if(1. < mid){
if(lambda1 + save_eps_half< 1.){
innershift = lambda1 + save_eps_half;
}else{
innershift = 1.;
}
}else{
if(lambda1 + save_eps_half <mid){
innershift = lambda1 + save_eps_half;
}else{
innershift = mid;
}
}
bool small_dist_at_innershift;
bool small_dist_at_outershift;
distance_t circle_center_dist_innershift_sqr = 0;
distance_t circle_center_dist1_outershift_sqr = 0;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
distance_t coordinate_for_dist_innershift = line_start_p[coordinate] * (1. - innershift) + line_end_p[coordinate] * innershift;
distance_t coordinate_for_dist1_outershift = line_start_p[coordinate] * (1. - outershift) + line_end_p[coordinate] * outershift;
circle_center_dist_innershift_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist_innershift, 2);
circle_center_dist1_outershift_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist1_outershift, 2);
if(coordinate == *point_dimensions_p - 1){
small_dist_at_innershift = circle_center_dist_innershift_sqr <= radius_sqr;
small_dist_at_outershift = circle_center_dist1_outershift_sqr <= radius_sqr;
}
}
if(innershift >= outershift and small_dist_at_innershift and not small_dist_at_outershift){
begin = innershift;
}else{
distance_t left = 0., right;
if(mid < 1.){
right = mid;
}else{
right = 1;
}
distance_t m = 0.5 * (left + right);
while(right - left > save_eps){
m = 0.5 * (left + right);
bool small_dist_at_m;
distance_t circle_center_dist_m_sqr = 0;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
distance_t coordinate_for_dist_m = line_start_p[coordinate] * (1. - m) + line_end_p[coordinate] * m;
circle_center_dist_m_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist_m, 2);
if(coordinate == *point_dimensions_p - 1){
small_dist_at_m = circle_center_dist_m_sqr <= radius_sqr;
}
}
if(small_dist_at_m){
right = m;
}else{
left = m;
}
}
begin = right;
}
}
if(smallDistAtOne){
end = 1.;
}else{
if(not sqrt_discr_computed){
sqrt_discr = sqrtf(discriminant);
}
const distance_t lambda2 = mid + sqrt_discr;
const distance_t outershift = lambda2 + save_eps_half;
distance_t innershift;
if(0 > mid){
if( 0 > lambda2 - save_eps_half){
innershift = 0;
}else{
innershift = lambda2 - save_eps_half;
}
}else{
if( mid > lambda2 - save_eps_half){
innershift = mid;
}else{
innershift = lambda2 - save_eps_half;
}
}
bool small_dist_at_innershift;
bool small_dist_at_outershift;
distance_t circle_center_dist_innershift_sqr = 0;
distance_t circle_center_dist1_outershift_sqr = 0;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
distance_t coordinate_for_dist_innershift = line_start_p[coordinate] * (1. - innershift) + line_end_p[coordinate] * innershift;
distance_t coordinate_for_dist1_outershift = line_start_p[coordinate] * (1. - outershift) + line_end_p[coordinate] * outershift;
circle_center_dist_innershift_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist_innershift, 2);
circle_center_dist1_outershift_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist1_outershift, 2);
if(coordinate == *point_dimensions_p - 1){
small_dist_at_innershift = circle_center_dist_innershift_sqr <= radius_sqr;
small_dist_at_outershift = circle_center_dist1_outershift_sqr <= radius_sqr;
}
}
if(innershift <= outershift and small_dist_at_innershift and not small_dist_at_outershift){
end = innershift;
}else{
distance_t left, right = 1.;
if(mid > 0.){
left = mid;
}else{
left = 0.;
}
distance_t m = 0.5 * (left + right);
while(right - left > save_eps){
m = 0.5 * (left + right);
bool small_dist_at_m;
distance_t circle_center_dist_m_sqr = 0;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
distance_t coordinate_for_dist_m = line_start_p[coordinate] * (1. - m) + line_end_p[coordinate] * m;
circle_center_dist_m_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist_m, 2);
if(coordinate == *point_dimensions_p - 1){
small_dist_at_m = circle_center_dist_m_sqr <= radius_sqr;
}
}
if(small_dist_at_m){
left = m;
}else{
right = m;
}
}
end = left;
}
}
if(current_repetition == 2){
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))] = begin;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))+1] = end;
return;
}else{
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )] = begin;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )+1] = end;
}
current_repetition++;
}
}
hipError_t Cuda_intersection::intersection_interval_call_gpu(
distance_t radius
){
hipError_t cudaStatus_global = hipSuccess;
for(int device_nbr = 0; device_nbr < number_devices; device_nbr++){
//Cuda launching utils
hipError_t cudaStatus;
curve_size_t number_of_threads = curve1_size[device_nbr] * curve2_size[device_nbr];
curve_size_t number_of_blocks = 0;
cudaStatus = hipSetDevice(device_nbr);
if(cudaStatus != hipSuccess){
std::cerr << "CUDASetDevice failed! Do you have CUDA-capable GPU installed?" << std::endl;
}
cudaStatus = hipMemcpy(dev_radius_p[device_nbr], &radius, sizeof(distance_t), hipMemcpyHostToDevice );
if (cudaStatus != hipSuccess ){
std::cerr << "CudaMemcpy radius to dev_radius_p failed!" << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
if((number_of_threads%BLOCKSIZE) == 0){
number_of_blocks = number_of_threads / BLOCKSIZE;
}else{
number_of_blocks = ((curve_size_t)(number_of_threads/ BLOCKSIZE)) + 1;
}
hipLaunchKernelGGL(( cuda_kernel1_calculate_intervals) , dim3(number_of_blocks), dim3(BLOCKSIZE) , 0, 0, dev_results_p[device_nbr],
dev_points_curve1_p[device_nbr],
dev_points_curve2_p[device_nbr],
dev_curve1_size_p[device_nbr],
dev_curve2_size_p[device_nbr],
dev_point_dimensions_p[device_nbr],
dev_radius_p[device_nbr],
dev_is_last_kernel_p[device_nbr],
eps
);
}
for(int device_nbr = 0; device_nbr < number_devices; device_nbr++){
hipError_t cudaStatus;
cudaStatus = hipSetDevice(device_nbr);
if(cudaStatus != hipSuccess){
std::cerr << "CUDASetDevice failed! Do you have CUDA-capable GPU installed?" << std::endl;
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess){
std::cerr << "CudaGetLastError returned error code: " << cudaStatus << " after launching kernel!" << std::endl;
std::cerr << hipGetErrorString(cudaStatus) << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
// Waits until all threads are done with their job.
cudaStatus = hipDeviceSynchronize();
if( cudaStatus != hipSuccess){
std::cerr << "CudaDeviceSynchronize() returned error code: " << cudaStatus << " after launching kernel!" << std::endl;
std::cerr << hipGetErrorString(cudaStatus) << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
if(device_nbr == number_devices - 1){
cudaStatus = hipMemcpy(
host_results_p + 2* (curve1_end_index[number_devices-1]+1)*(curve2_start_index[device_nbr]),
dev_results_p[device_nbr], sizeof(distance_t) * (2 * curve1_size[device_nbr] * curve2_size[device_nbr]),
hipMemcpyDeviceToHost
);
if(cudaStatus != hipSuccess){
std::cerr << "CudaMemcpy dev_results into results failed!" << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
cudaStatus = hipMemcpy(
host_results_p +2* (curve1_end_index[number_devices-1]+1)*(curve2_end_index[number_devices-1]+1) +2*(curve1_end_index[number_devices-1]+1)*(curve2_start_index[device_nbr]),
dev_results_p[device_nbr] + 2*(curve1_size[device_nbr])*(curve2_size[device_nbr]),
sizeof(distance_t) * (2 * curve1_size[device_nbr] * curve2_size[device_nbr]),
hipMemcpyDeviceToHost
);
if(cudaStatus != hipSuccess){
std::cerr << "CudaMemcpy dev_results into results failed!" << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
}else{
cudaStatus = hipMemcpy(
host_results_p + 2*(curve1_end_index[number_devices-1]+1)*(curve2_start_index[device_nbr]),
dev_results_p[device_nbr],
sizeof(distance_t) * (2 * curve1_size[device_nbr] * curve2_size[device_nbr]),
hipMemcpyDeviceToHost
);
if(cudaStatus != hipSuccess){
std::cerr << "CudaMemcpy dev_results into results failed!" << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
cudaStatus = hipMemcpy(
host_results_p + 2*(curve1_end_index[number_devices-1]+1)*(curve2_end_index[number_devices-1]+1) +2*(curve1_end_index[number_devices-1]+1)*(curve2_start_index[device_nbr]),
dev_results_p[device_nbr] + 2*(curve1_size[device_nbr])*(curve2_size[device_nbr]),
sizeof(distance_t) * (2 * curve1_size[device_nbr] * curve2_size[device_nbr]),
hipMemcpyDeviceToHost
);
if(cudaStatus != hipSuccess){
std::cerr << "CudaMemcpy dev_results into results failed!" << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
}
}
goto Regular_finish;
//Free the memory on Error
Error:
free_memory();
return cudaStatus_global;
Regular_finish:
return cudaStatus_global;
}
| a2e7b8b8605d7bfa4ead89da96f8bdc89aa3307c.cu | #include <math.h>
#include "intersection_algorithm_in_parallel.hpp"
#define BLOCKSIZE 64
__global__ void cuda_kernel1_calculate_intervals(
distance_t *results_p,
coordinate_t *points_curve1_p,
coordinate_t *points_curve2_p,
curve_size_t *curve1_size_p,
curve_size_t *curve2_size_p,
curve_size_t *point_dimensions_p,
distance_t *radius_p,
bool *is_last
);
Cuda_intersection::Cuda_intersection(const Curve& curve1,const Curve& curve2, distance_t *host_res_p, distance_t eps) : eps{eps} {
cudaError_t cudaStatus;
host_results_p = host_res_p;
point_dimensions = curve1[0].coordinates.size();
points_curve1_p = (coordinate_t*)malloc(sizeof(coordinate_t)*point_dimensions*curve1.size());
points_curve2_p = (coordinate_t*)malloc(sizeof(coordinate_t)*point_dimensions*curve2.size());
//If we have the opportunity to use multiple gpus then we will split curve2 onto the gpus, but give each gpu
//the complete first curve.
cudaStatus = cudaGetDeviceCount(&number_devices);
if(cudaStatus != cudaSuccess){
std::cerr << "cudaGetDeviceCout failed! Do you have CUDA-capable GPU installed?" << std::endl;
goto Error;
}
//If splitting curve2 ist not reasonable, do not do it
if(curve2.size() <= 2*number_devices){
number_devices = 1;
}
dev_radius_p.reserve(number_devices);
dev_points_curve1_p.reserve(number_devices);
dev_points_curve2_p.reserve(number_devices);
dev_curve1_size_p.reserve(number_devices);
dev_curve2_size_p.reserve(number_devices);
dev_point_dimensions_p.reserve(number_devices);
dev_results_p.reserve(number_devices);
dev_is_last_kernel_p.reserve(number_devices);
curve1_size.reserve(number_devices);
curve2_size.reserve(number_devices);
curve1_start_index.reserve(number_devices);
curve2_start_index.reserve(number_devices);
curve1_end_index.reserve(number_devices);
curve2_end_index.reserve(number_devices);
curve1_start_index[0] = 0;
curve2_start_index[0] = 0;
if(number_devices != 1){
curve1_end_index[0] = curve1.size()-1;
if(ceil(curve2.size() / number_devices) > curve2.size()-1){
curve2_end_index[0] = curve2.size() -1;
}else{
curve2_end_index[0] = ceil(curve2.size() / number_devices);
}
}else{
curve1_end_index[0] = curve1.size() - 1;
curve2_end_index[0] = curve2.size() - 1;
}
curve1_size[0] = curve1_end_index[0]-curve1_start_index[0]+1;
curve2_size[0] = curve2_end_index[0]-curve2_start_index[0]+1;
//Fill the points:
for( curve_size_t i = 0; i < curve1.size(); i++){
for( curve_size_t j = 0; j < point_dimensions; j++){
points_curve1_p[i*point_dimensions + j] = curve1[i].coordinates[j];
}
}
for( curve_size_t i = 0; i < curve2.size(); i++){
for( curve_size_t j = 0; j < point_dimensions; j++){
points_curve2_p[i*point_dimensions + j] = curve2[i].coordinates[j];
}
}
for(short device_nbr = 1; device_nbr < number_devices; device_nbr++){
curve1_start_index[device_nbr] = 0;
curve2_start_index[device_nbr] = curve2_end_index[device_nbr-1] - 1;
curve1_end_index[device_nbr] = curve1.size() - 1;
if(not device_nbr == number_devices - 1){
curve2_end_index[device_nbr] = curve2_start_index[device_nbr-1] + ceil(curve2.size() / number_devices);
}else{
curve2_end_index[device_nbr] = curve2.size() - 1;
}
curve1_size[device_nbr] = curve1_end_index[device_nbr]-curve1_start_index[device_nbr]+1;
curve2_size[device_nbr] = curve2_end_index[device_nbr]-curve2_start_index[device_nbr]+1;
}
#if DEBUG
std::cout << "NUMBER DEVICES: " << number_devices << std::endl;
#endif
for(short device_nbr = 0; device_nbr < number_devices; device_nbr++){
//Choose which GPU to run on, change this on a multi-GPU system if needed!
cudaStatus = cudaSetDevice(device_nbr);
if(cudaStatus != cudaSuccess){
std::cerr << "CUDASetDevice failed! Do you have CUDA-capable GPU installed?" << std::endl;
}
//Allocate buffers on the GPU
cudaStatus = cudaMalloc((void**)&dev_results_p[device_nbr], sizeof(distance_t)*2*(2 * curve1_size[device_nbr] * curve2_size[device_nbr]));
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMalloc dev_results failed!" << std::endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_points_curve1_p[device_nbr], sizeof(coordinate_t)* point_dimensions * curve1_size[device_nbr]);
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMalloc dev_points_curve1 failed!" << std::endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_points_curve2_p[device_nbr], sizeof(coordinate_t)* point_dimensions * curve2_size[device_nbr]);
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMalloc dev_points_curve2 failed!" << std::endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_curve1_size_p[device_nbr], sizeof(curve_size_t));
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMalloc dev_curve1_size_p failed!" << std::endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_curve2_size_p[device_nbr], sizeof(curve_size_t));
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMalloc dev_curve2_size_p failed!" << std::endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_point_dimensions_p[device_nbr], sizeof(curve_size_t));
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMalloc dev_point_dimensions_p failed!" << std::endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_radius_p[device_nbr], sizeof(distance_t));
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMalloc dev_radius failed!" << std::endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_is_last_kernel_p[device_nbr], sizeof(bool));
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMalloc dev_is_last_kernel_p failed!" << std::endl;
goto Error;
}
//Copy Data into device memory
cudaStatus = cudaMemcpy(dev_points_curve1_p[device_nbr], &points_curve1_p[curve1_start_index[device_nbr]], sizeof(coordinate_t)* point_dimensions * (curve1_end_index[device_nbr]-curve1_start_index[device_nbr]+1), cudaMemcpyHostToDevice );
if (cudaStatus != cudaSuccess ){
std::cerr << "CudaMemcpy curve1_points to dev_points_curve1_p failed!" << std::endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_points_curve2_p[device_nbr], &points_curve2_p[curve2_start_index[device_nbr]], sizeof(coordinate_t)* point_dimensions * (curve2_end_index[device_nbr]-curve2_start_index[device_nbr]+1), cudaMemcpyHostToDevice );
if (cudaStatus != cudaSuccess ){
std::cerr << "CudaMemcpy curve2_points to dev_points_curve2_p failed!" << std::endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_curve1_size_p[device_nbr], &curve1_size[device_nbr], sizeof(curve_size_t), cudaMemcpyHostToDevice );
if (cudaStatus != cudaSuccess ){
std::cerr << "CudaMemcpy curve.size() host to device failed!" << std::endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_curve2_size_p[device_nbr], &curve2_size[device_nbr], sizeof(curve_size_t), cudaMemcpyHostToDevice );
if (cudaStatus != cudaSuccess ){
std::cerr << "CudaMemcpy curve2.size() host to device failed!" << std::endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_point_dimensions_p[device_nbr], &point_dimensions, sizeof(curve_size_t), cudaMemcpyHostToDevice );
if (cudaStatus != cudaSuccess ){
std::cerr << "CudaMemcpy point_dimensions host to device failed!" << std::endl;
goto Error;
}
if(device_nbr == number_devices-1){
cudaStatus = cudaMemcpy(dev_is_last_kernel_p[device_nbr], &is_last ,sizeof(bool), cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMemcpy dev_is_last_kernel_p failed!" << std::endl;
goto Error;
}
}else{
cudaStatus = cudaMemcpy(dev_is_last_kernel_p[device_nbr], &is_not_last ,sizeof(bool), cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMemcpy dev_is_last_kernel_p failed!" << std::endl;
goto Error;
}
}
}
goto NoError;
Error:
free_memory();
NoError:
return;
}
void Cuda_intersection::free_memory(){
if(not is_buffers_free){
for(short device_nbr = 0; device_nbr < number_devices; device_nbr++){
cudaFree(dev_results_p[device_nbr]);
cudaFree(dev_points_curve1_p[device_nbr]);
cudaFree(dev_points_curve2_p[device_nbr]);
cudaFree(dev_radius_p[device_nbr]);
cudaFree(dev_curve1_size_p[device_nbr]);
cudaFree(dev_curve2_size_p[device_nbr]);
cudaFree(dev_point_dimensions_p[device_nbr]);
cudaFree(dev_is_last_kernel_p[device_nbr]);
}
free(points_curve1_p);
free(points_curve2_p);
is_buffers_free = true;
}
}
void Cuda_intersection::intersection_interval_cuda(
distance_t radius
){
cudaError_t cudaStatus = intersection_interval_call_gpu(radius);
if(cudaStatus != cudaSuccess){
std::cerr << "intersection_interval_call_gpu failed!" << std::endl;
}
}
__global__ void cuda_kernel1_calculate_intervals(
distance_t *results_p,
coordinate_t *points_curve1_p,
coordinate_t *points_curve2_p,
curve_size_t *curve1_size_p,
curve_size_t *curve2_size_p,
curve_size_t *point_dimensions_p,
distance_t *radius_p,
bool *is_last,
distance_t eps
){
thread_id_t thread_id = threadIdx.x + blockIdx.x * blockDim.x;
curve_size_t curve1_index = (curve_size_t)(thread_id % (*curve1_size_p));
curve_size_t curve2_index = (curve_size_t)(thread_id / (*curve1_size_p));
if(curve1_index >= *curve1_size_p or curve2_index >= *curve2_size_p){
return;
}
if(curve1_index == *curve1_size_p - 1 and not *is_last){
return;
}
if(curve2_index == *curve2_size_p - 1 and not *is_last){
return;
}
coordinate_t *circle_center_p;
coordinate_t *line_start_p;
coordinate_t *line_end_p;
//If this is not getting set then the interval does not have to be calculated
bool need_to_calculate_v1 = false;
bool need_to_calculate_v2 = false;
short reps = 0;
if(curve1_index < *curve1_size_p -1 and curve2_index > 0){
need_to_calculate_v1 = true;
}
if(curve2_index < *curve2_size_p -1 and curve1_index > 0){
need_to_calculate_v2 = true;
}
if(need_to_calculate_v1 and need_to_calculate_v2){
reps = 2;
}else if(need_to_calculate_v1){
reps = 1;
}else if(need_to_calculate_v2){
reps = 1;
}
//Do the intersection algorithm if needed
short current_repetition = 1;
while(current_repetition <= reps){
if(need_to_calculate_v1){
need_to_calculate_v1 = false;
circle_center_p = &points_curve2_p[curve2_index * *point_dimensions_p];
line_start_p = &points_curve1_p[curve1_index * *point_dimensions_p];
line_end_p = &points_curve1_p[(curve1_index+1) * *point_dimensions_p];
}
else if(need_to_calculate_v2){
need_to_calculate_v2 = false;
circle_center_p = &points_curve1_p[curve1_index * *point_dimensions_p];
line_start_p = &points_curve2_p[curve2_index * *point_dimensions_p];
line_end_p = &points_curve2_p[(curve2_index+1) * *point_dimensions_p];
}else {
return;
}
//const distance_t eps = 0.001 / 4;
const distance_t save_eps = 0.5 * eps;
const distance_t save_eps_half = 0.25 * eps;
distance_t radius_sqr = *radius_p * *radius_p;
distance_t dist_a = 0;
distance_t dist_b = 0;
distance_t dist_c = - radius_sqr;
distance_t mid;
distance_t discriminant;
distance_t sqrt_discr = 0.;
distance_t begin, end;
bool smallDistAtZero;
bool smallDistAtOne;
bool smallDistAtMid;
bool sqrt_discr_computed = false;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
coordinate_t end_minus_start_coordinate = line_end_p[coordinate] - line_start_p[coordinate];
dist_a += end_minus_start_coordinate * end_minus_start_coordinate;
dist_b += (line_start_p[coordinate] - circle_center_p[coordinate]) * end_minus_start_coordinate;
dist_c += powf(line_start_p[coordinate] - circle_center_p[coordinate], 2);
}
mid = - dist_b / dist_a;
discriminant = mid * mid - dist_c / dist_a;
distance_t circle_center_dist0_sqr = 0;
distance_t circle_center_dist1_sqr = 0;
distance_t circle_center_dist_mid_sqr = 0;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
distance_t coordinate_for_dist0 = line_start_p[coordinate] * 1.;
distance_t coordinate_for_dist1 = line_end_p[coordinate] * 1.;
distance_t coordinate_for_dist_mid = line_start_p[coordinate] * (1. - mid) + line_end_p[coordinate] * mid;
circle_center_dist0_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist0, 2);
circle_center_dist1_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist1, 2);
circle_center_dist_mid_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist_mid, 2);
if(coordinate == *point_dimensions_p - 1){
smallDistAtZero = circle_center_dist0_sqr <= radius_sqr;
smallDistAtOne = circle_center_dist1_sqr <= radius_sqr;
smallDistAtMid = circle_center_dist_mid_sqr <= radius_sqr;
}
}
if(smallDistAtZero and smallDistAtOne){
if(current_repetition == 2){
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))] = 0;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))+1] = 1;
return;
}else{
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )] = 0;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index)+1] = 1;
current_repetition++;
continue;
}
}
if(not smallDistAtMid and smallDistAtZero){
mid = 0.;
smallDistAtMid = true;
}else if(not smallDistAtMid and smallDistAtOne){
mid = 1.;
smallDistAtMid = true;
}
if(not smallDistAtMid){
if(current_repetition == 2){
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))+1] = 0.;
return;
}else{
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )+1] = 0.;
current_repetition++;
continue;
}
}
if(mid <= 0. and not smallDistAtZero){
if(current_repetition == 2){
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))+1] = 0.;
return;
}else{
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )+1] = 0.;
current_repetition++;
continue;
}
}
if(mid >= 1. and not smallDistAtOne){
if(current_repetition == 2){
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))+1] = 0.;
return;
}else{
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index)] = 1.;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index)+1] = 0.;
current_repetition++;
continue;
}
}
if(discriminant < 0.){
discriminant = 0.;
}
sqrt_discr = 0.;
if(smallDistAtZero){
begin = 0.;
}else{
sqrt_discr = (distance_t)sqrtf(discriminant);
sqrt_discr_computed = true;
const distance_t lambda1 = mid - sqrt_discr;
const distance_t outershift = lambda1 - save_eps_half;
distance_t innershift;
if(1. < mid){
if(lambda1 + save_eps_half< 1.){
innershift = lambda1 + save_eps_half;
}else{
innershift = 1.;
}
}else{
if(lambda1 + save_eps_half <mid){
innershift = lambda1 + save_eps_half;
}else{
innershift = mid;
}
}
bool small_dist_at_innershift;
bool small_dist_at_outershift;
distance_t circle_center_dist_innershift_sqr = 0;
distance_t circle_center_dist1_outershift_sqr = 0;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
distance_t coordinate_for_dist_innershift = line_start_p[coordinate] * (1. - innershift) + line_end_p[coordinate] * innershift;
distance_t coordinate_for_dist1_outershift = line_start_p[coordinate] * (1. - outershift) + line_end_p[coordinate] * outershift;
circle_center_dist_innershift_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist_innershift, 2);
circle_center_dist1_outershift_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist1_outershift, 2);
if(coordinate == *point_dimensions_p - 1){
small_dist_at_innershift = circle_center_dist_innershift_sqr <= radius_sqr;
small_dist_at_outershift = circle_center_dist1_outershift_sqr <= radius_sqr;
}
}
if(innershift >= outershift and small_dist_at_innershift and not small_dist_at_outershift){
begin = innershift;
}else{
distance_t left = 0., right;
if(mid < 1.){
right = mid;
}else{
right = 1;
}
distance_t m = 0.5 * (left + right);
while(right - left > save_eps){
m = 0.5 * (left + right);
bool small_dist_at_m;
distance_t circle_center_dist_m_sqr = 0;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
distance_t coordinate_for_dist_m = line_start_p[coordinate] * (1. - m) + line_end_p[coordinate] * m;
circle_center_dist_m_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist_m, 2);
if(coordinate == *point_dimensions_p - 1){
small_dist_at_m = circle_center_dist_m_sqr <= radius_sqr;
}
}
if(small_dist_at_m){
right = m;
}else{
left = m;
}
}
begin = right;
}
}
if(smallDistAtOne){
end = 1.;
}else{
if(not sqrt_discr_computed){
sqrt_discr = sqrtf(discriminant);
}
const distance_t lambda2 = mid + sqrt_discr;
const distance_t outershift = lambda2 + save_eps_half;
distance_t innershift;
if(0 > mid){
if( 0 > lambda2 - save_eps_half){
innershift = 0;
}else{
innershift = lambda2 - save_eps_half;
}
}else{
if( mid > lambda2 - save_eps_half){
innershift = mid;
}else{
innershift = lambda2 - save_eps_half;
}
}
bool small_dist_at_innershift;
bool small_dist_at_outershift;
distance_t circle_center_dist_innershift_sqr = 0;
distance_t circle_center_dist1_outershift_sqr = 0;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
distance_t coordinate_for_dist_innershift = line_start_p[coordinate] * (1. - innershift) + line_end_p[coordinate] * innershift;
distance_t coordinate_for_dist1_outershift = line_start_p[coordinate] * (1. - outershift) + line_end_p[coordinate] * outershift;
circle_center_dist_innershift_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist_innershift, 2);
circle_center_dist1_outershift_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist1_outershift, 2);
if(coordinate == *point_dimensions_p - 1){
small_dist_at_innershift = circle_center_dist_innershift_sqr <= radius_sqr;
small_dist_at_outershift = circle_center_dist1_outershift_sqr <= radius_sqr;
}
}
if(innershift <= outershift and small_dist_at_innershift and not small_dist_at_outershift){
end = innershift;
}else{
distance_t left, right = 1.;
if(mid > 0.){
left = mid;
}else{
left = 0.;
}
distance_t m = 0.5 * (left + right);
while(right - left > save_eps){
m = 0.5 * (left + right);
bool small_dist_at_m;
distance_t circle_center_dist_m_sqr = 0;
for(curve_size_t coordinate = 0; coordinate < *point_dimensions_p; coordinate++){
distance_t coordinate_for_dist_m = line_start_p[coordinate] * (1. - m) + line_end_p[coordinate] * m;
circle_center_dist_m_sqr += powf(circle_center_p[coordinate] - coordinate_for_dist_m, 2);
if(coordinate == *point_dimensions_p - 1){
small_dist_at_m = circle_center_dist_m_sqr <= radius_sqr;
}
}
if(small_dist_at_m){
left = m;
}else{
right = m;
}
}
end = left;
}
}
if(current_repetition == 2){
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))] = begin;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index + (*curve1_size_p)*(*curve2_size_p))+1] = end;
return;
}else{
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )] = begin;
results_p[2*(curve2_index*(*curve1_size_p) + curve1_index )+1] = end;
}
current_repetition++;
}
}
cudaError_t Cuda_intersection::intersection_interval_call_gpu(
distance_t radius
){
cudaError_t cudaStatus_global = cudaSuccess;
for(int device_nbr = 0; device_nbr < number_devices; device_nbr++){
//Cuda launching utils
cudaError_t cudaStatus;
curve_size_t number_of_threads = curve1_size[device_nbr] * curve2_size[device_nbr];
curve_size_t number_of_blocks = 0;
cudaStatus = cudaSetDevice(device_nbr);
if(cudaStatus != cudaSuccess){
std::cerr << "CUDASetDevice failed! Do you have CUDA-capable GPU installed?" << std::endl;
}
cudaStatus = cudaMemcpy(dev_radius_p[device_nbr], &radius, sizeof(distance_t), cudaMemcpyHostToDevice );
if (cudaStatus != cudaSuccess ){
std::cerr << "CudaMemcpy radius to dev_radius_p failed!" << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
if((number_of_threads%BLOCKSIZE) == 0){
number_of_blocks = number_of_threads / BLOCKSIZE;
}else{
number_of_blocks = ((curve_size_t)(number_of_threads/ BLOCKSIZE)) + 1;
}
cuda_kernel1_calculate_intervals <<<number_of_blocks, BLOCKSIZE >>>( dev_results_p[device_nbr],
dev_points_curve1_p[device_nbr],
dev_points_curve2_p[device_nbr],
dev_curve1_size_p[device_nbr],
dev_curve2_size_p[device_nbr],
dev_point_dimensions_p[device_nbr],
dev_radius_p[device_nbr],
dev_is_last_kernel_p[device_nbr],
eps
);
}
for(int device_nbr = 0; device_nbr < number_devices; device_nbr++){
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(device_nbr);
if(cudaStatus != cudaSuccess){
std::cerr << "CUDASetDevice failed! Do you have CUDA-capable GPU installed?" << std::endl;
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess){
std::cerr << "CudaGetLastError returned error code: " << cudaStatus << " after launching kernel!" << std::endl;
std::cerr << cudaGetErrorString(cudaStatus) << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
// Waits until all threads are done with their job.
cudaStatus = cudaDeviceSynchronize();
if( cudaStatus != cudaSuccess){
std::cerr << "CudaDeviceSynchronize() returned error code: " << cudaStatus << " after launching kernel!" << std::endl;
std::cerr << cudaGetErrorString(cudaStatus) << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
if(device_nbr == number_devices - 1){
cudaStatus = cudaMemcpy(
host_results_p + 2* (curve1_end_index[number_devices-1]+1)*(curve2_start_index[device_nbr]),
dev_results_p[device_nbr], sizeof(distance_t) * (2 * curve1_size[device_nbr] * curve2_size[device_nbr]),
cudaMemcpyDeviceToHost
);
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMemcpy dev_results into results failed!" << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
cudaStatus = cudaMemcpy(
host_results_p +2* (curve1_end_index[number_devices-1]+1)*(curve2_end_index[number_devices-1]+1) +2*(curve1_end_index[number_devices-1]+1)*(curve2_start_index[device_nbr]),
dev_results_p[device_nbr] + 2*(curve1_size[device_nbr])*(curve2_size[device_nbr]),
sizeof(distance_t) * (2 * curve1_size[device_nbr] * curve2_size[device_nbr]),
cudaMemcpyDeviceToHost
);
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMemcpy dev_results into results failed!" << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
}else{
cudaStatus = cudaMemcpy(
host_results_p + 2*(curve1_end_index[number_devices-1]+1)*(curve2_start_index[device_nbr]),
dev_results_p[device_nbr],
sizeof(distance_t) * (2 * curve1_size[device_nbr] * curve2_size[device_nbr]),
cudaMemcpyDeviceToHost
);
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMemcpy dev_results into results failed!" << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
cudaStatus = cudaMemcpy(
host_results_p + 2*(curve1_end_index[number_devices-1]+1)*(curve2_end_index[number_devices-1]+1) +2*(curve1_end_index[number_devices-1]+1)*(curve2_start_index[device_nbr]),
dev_results_p[device_nbr] + 2*(curve1_size[device_nbr])*(curve2_size[device_nbr]),
sizeof(distance_t) * (2 * curve1_size[device_nbr] * curve2_size[device_nbr]),
cudaMemcpyDeviceToHost
);
if(cudaStatus != cudaSuccess){
std::cerr << "CudaMemcpy dev_results into results failed!" << std::endl;
cudaStatus_global = cudaStatus;
goto Error;
}
}
}
goto Regular_finish;
//Free the memory on Error
Error:
free_memory();
return cudaStatus_global;
Regular_finish:
return cudaStatus_global;
}
|
24bbc12cbe5b95f80d14dd24cf6591d4c1fb5a55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <vector>
//
#define CUDA_ERROR_CHECK // enable error check in cuda
#include "/home/wxie/AI/Spike/July_2019/Spike_July_2019/Spike/Backend/CUDA/Helpers/ErrorCheck.hpp"
using namespace std;
// Kernel function to do nested loops
__global__
void add(int max_x, int max_y, float *tot, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < max_x && j<max_y) {
atomicAdd(tot, x[i] + y[j]);
}
}
int main(void)
{
int Nx = 1<<15;
int Ny = 1<<15;
float *d_x = NULL, *d_y = NULL;
float *d_tot = NULL;
CudaSafeCall(hipMalloc((void **)&d_x, sizeof(float)*Nx));
CudaSafeCall(hipMalloc((void **)&d_y, sizeof(float)*Ny));
CudaSafeCall(hipMalloc((void **)&d_tot, sizeof(float)));
// Allocate Unified Memory accessible from CPU or GPU
vector<float> vx;
vector<float> vy;
// initialize x and y arrays on the host
for (int i = 0; i < Nx; i++)
vx.push_back(i);
for (int i = 0; i < Ny; i++)
vy.push_back(i*10);
//
float tot = 0;
for(int i = 0; i<vx.size(); i++)
for(int j = 0; j<vy.size(); j++)
tot += vx[i] + vy[j];
cout<<"CPU: tot: "<<tot<<endl;
//
CudaSafeCall(hipMemcpy(d_x, vx.data(), vx.size()*sizeof(float), hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_y, vy.data(), vy.size()*sizeof(float), hipMemcpyHostToDevice));
//
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, add, 0, Nx+Ny);
//.. bx*by can not go beyond the blockSize, or hardware limit, which is 1024;
//.. bx*bx = blockSize && bx/by=Nx/Ny, solve the equation
int bx = sqrt(blockSize*Nx/(float)Ny);
int by = bx*Ny/(float)Nx;
dim3 blockSize_3D(bx, by);
dim3 gridSize_3D((Nx+bx-1)/bx, (Ny+by+1)/by);
cout<<"blockSize: "<<blockSize<<endl;
cout<<"bx: "<<bx<<" by: "<<by<<" gx: "<<gridSize_3D.x<<" gy: "<<gridSize_3D.y<<endl;
// calculate theoretical occupancy
int maxActiveBlocks;
hipOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, add, blockSize, 0);
int device;
hipDeviceProp_t props;
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /
(float)(props.maxThreadsPerMultiProcessor /
props.warpSize);
printf("Launched blocks of size %d. Theoretical occupancy: %f\n",
blockSize, occupancy);
// Run kernel on 1M elements on the GPU
tot = 0;
hipLaunchKernelGGL(( add), dim3(gridSize_3D), dim3(blockSize_3D), 0, 0, Nx, Ny, d_tot, d_x, d_y);
// Wait for GPU to finish before accessing on host
CudaCheckError(); //.. defined in SPIKE include hipDeviceSynchronize()
//
CudaSafeCall(hipMemcpy(&tot, d_tot, sizeof(float), hipMemcpyDeviceToHost));
//
cout<<" GPU: tot: "<<tot<<endl;
// Free memory
CudaSafeCall(hipFree(d_x));
CudaSafeCall(hipFree(d_y));
CudaSafeCall(hipFree(d_tot));
return 0;
}
| 24bbc12cbe5b95f80d14dd24cf6591d4c1fb5a55.cu | #include <iostream>
#include <math.h>
#include <vector>
//
#define CUDA_ERROR_CHECK // enable error check in cuda
#include "/home/wxie/AI/Spike/July_2019/Spike_July_2019/Spike/Backend/CUDA/Helpers/ErrorCheck.hpp"
using namespace std;
// Kernel function to do nested loops
__global__
void add(int max_x, int max_y, float *tot, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < max_x && j<max_y) {
atomicAdd(tot, x[i] + y[j]);
}
}
int main(void)
{
int Nx = 1<<15;
int Ny = 1<<15;
float *d_x = NULL, *d_y = NULL;
float *d_tot = NULL;
CudaSafeCall(cudaMalloc((void **)&d_x, sizeof(float)*Nx));
CudaSafeCall(cudaMalloc((void **)&d_y, sizeof(float)*Ny));
CudaSafeCall(cudaMalloc((void **)&d_tot, sizeof(float)));
// Allocate Unified Memory – accessible from CPU or GPU
vector<float> vx;
vector<float> vy;
// initialize x and y arrays on the host
for (int i = 0; i < Nx; i++)
vx.push_back(i);
for (int i = 0; i < Ny; i++)
vy.push_back(i*10);
//
float tot = 0;
for(int i = 0; i<vx.size(); i++)
for(int j = 0; j<vy.size(); j++)
tot += vx[i] + vy[j];
cout<<"CPU: tot: "<<tot<<endl;
//
CudaSafeCall(cudaMemcpy(d_x, vx.data(), vx.size()*sizeof(float), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_y, vy.data(), vy.size()*sizeof(float), cudaMemcpyHostToDevice));
//
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, add, 0, Nx+Ny);
//.. bx*by can not go beyond the blockSize, or hardware limit, which is 1024;
//.. bx*bx = blockSize && bx/by=Nx/Ny, solve the equation
int bx = sqrt(blockSize*Nx/(float)Ny);
int by = bx*Ny/(float)Nx;
dim3 blockSize_3D(bx, by);
dim3 gridSize_3D((Nx+bx-1)/bx, (Ny+by+1)/by);
cout<<"blockSize: "<<blockSize<<endl;
cout<<"bx: "<<bx<<" by: "<<by<<" gx: "<<gridSize_3D.x<<" gy: "<<gridSize_3D.y<<endl;
// calculate theoretical occupancy
int maxActiveBlocks;
cudaOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, add, blockSize, 0);
int device;
cudaDeviceProp props;
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /
(float)(props.maxThreadsPerMultiProcessor /
props.warpSize);
printf("Launched blocks of size %d. Theoretical occupancy: %f\n",
blockSize, occupancy);
// Run kernel on 1M elements on the GPU
tot = 0;
add<<<gridSize_3D, blockSize_3D>>>(Nx, Ny, d_tot, d_x, d_y);
// Wait for GPU to finish before accessing on host
CudaCheckError(); //.. defined in SPIKE include cudaDeviceSynchronize()
//
CudaSafeCall(cudaMemcpy(&tot, d_tot, sizeof(float), cudaMemcpyDeviceToHost));
//
cout<<" GPU: tot: "<<tot<<endl;
// Free memory
CudaSafeCall(cudaFree(d_x));
CudaSafeCall(cudaFree(d_y));
CudaSafeCall(cudaFree(d_tot));
return 0;
}
|
1ac30b0019f6e003c1086238f8588fc15611631d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define CREATE_RAND_ARR(arr, size, min, max) \
do { \
time_t t; \
srand((unsigned)time(&t)); \
for (int i = 0; i < size; i++) \
arr[i] = rand() % max + min; \
} while (0) \
#define PRINT_ARR(arr, size) \
do { \
for (int i = 0; i < size; i++) \
printf("%u, ", arr[i]); \
printf("\n"); \
} while (0) \
__global__ void ballot_kernel(int* before, int* after, const int size)
{
int threadsmask = 0xffffffff << threadIdx.x;
if (threadIdx.x < size) {
int e = threadIdx.x & 1;
// before[threadIdx.x] = e;
int ones = __ballot_sync(0xffffffff, e);
after[threadIdx.x] = ones;
before[threadIdx.x] = __popc(ones & threadsmask);
}
}
int main()
{
const int size = 8;
int* d_after;
int* d_before;
hipMalloc((void**)&d_after, size * sizeof(int));
hipMalloc((void**)&d_before, size * sizeof(int));
hipLaunchKernelGGL(( ballot_kernel) , dim3(1), dim3(32) , 0, 0, d_before, d_after, size);
int* h_before = (int*)malloc(size * sizeof(int));
int* h_after = (int*)malloc(size * sizeof(int));
hipMemcpy(h_before, d_before, size * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_after, d_after, size * sizeof(int), hipMemcpyDeviceToHost);
printf("BEFORE:\n");
PRINT_ARR(h_before, size);
printf("AFTER:\n");
PRINT_ARR(h_after, size);
hipFree(d_after);
hipFree(d_before);
free(h_before);
free(h_after);
return 0;
} | 1ac30b0019f6e003c1086238f8588fc15611631d.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <time.h>
#define CREATE_RAND_ARR(arr, size, min, max) \
do { \
time_t t; \
srand((unsigned)time(&t)); \
for (int i = 0; i < size; i++) \
arr[i] = rand() % max + min; \
} while (0) \
#define PRINT_ARR(arr, size) \
do { \
for (int i = 0; i < size; i++) \
printf("%u, ", arr[i]); \
printf("\n"); \
} while (0) \
__global__ void ballot_kernel(int* before, int* after, const int size)
{
int threadsmask = 0xffffffff << threadIdx.x;
if (threadIdx.x < size) {
int e = threadIdx.x & 1;
// before[threadIdx.x] = e;
int ones = __ballot_sync(0xffffffff, e);
after[threadIdx.x] = ones;
before[threadIdx.x] = __popc(ones & threadsmask);
}
}
int main()
{
const int size = 8;
int* d_after;
int* d_before;
cudaMalloc((void**)&d_after, size * sizeof(int));
cudaMalloc((void**)&d_before, size * sizeof(int));
ballot_kernel <<< 1, 32 >>> (d_before, d_after, size);
int* h_before = (int*)malloc(size * sizeof(int));
int* h_after = (int*)malloc(size * sizeof(int));
cudaMemcpy(h_before, d_before, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_after, d_after, size * sizeof(int), cudaMemcpyDeviceToHost);
printf("BEFORE:\n");
PRINT_ARR(h_before, size);
printf("AFTER:\n");
PRINT_ARR(h_after, size);
cudaFree(d_after);
cudaFree(d_before);
free(h_before);
free(h_after);
return 0;
} |
10a54bac8582fafc2af040f81f4ac2f534600d5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__
void calc_meanshift2(float* y_new, float* y_old, float* meanshift)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
float tempY_new = y_new[i];
float tempY_old = y_old[i];
meanshift[i] = (tempY_new-tempY_old)*(tempY_new-tempY_old);
}
__device__
float kernel_fun(float x, float sigma2)
{
if( x > sigma2)
return 0;
else
return exp(-x/2/sigma2);
}
__global__
void calc_Kernel_Matrix(int N, int D, float *x, float *y, float *K, int sigma2)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
// It is also true that ( k == (N*i + j) )
// Calc Dist...
float dist = 0;
for(int d=0; d<D; d++)
dist+= (y[i*D+d] - x[j*D+d])*(y[i*D+d] - x[j*D+d]);
K[i*N+j] = kernel_fun(dist, sigma2);
}
__global__
void kernel_sum_div(int D, float* y_new, float* K_sum)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
for(int d=0; d<D; d++)
y_new[i*D+d] = y_new[i*D+d]/K_sum[i];
}
__global__ void kernel_Dvec_mult(int N, int D, float* K, float* x, float* Kx, int d)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
Kx[i*N+j] = K[i*N+j]*x[j*D+d];
}
__global__ void copy_to_y(int D, float* d_y_new, float* kernelXsum, int d)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
d_y_new[i*D+d] = kernelXsum[i];
}
__global__ void calc_reduce_meanshift(int N, float* y_new, float* y_old, float* reducted_vec)
{
extern __shared__ float reduction_cache[] ;
//thread ID on each row of blocks
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int cache_i = threadIdx.x;
float temp=0;
float tempY_new; // This are usuful to ensure that only on GB access is gonna happen for each vector
float tempY_old;
while (tid < N)
{
tempY_new = y_new[tid];
tempY_old= y_old[tid];
temp += (tempY_new-tempY_old)*(tempY_new-tempY_old);
tid += blockDim.x * gridDim.x;
}
reduction_cache[cache_i] = temp;
__syncthreads();
// Begin the reduction per shared-memory-block
for(int i=blockDim.x/2; i>0; i>>=1)
{
if(cache_i < i)
reduction_cache[cache_i] += reduction_cache[cache_i+i];
__syncthreads();
}
// Final Sum is stored in global array.
if(cache_i==0)
reducted_vec[blockIdx.x] = reduction_cache[0];
}
// __global__
// void kernelX_dot_product(int N, int D, int d, float* K, float* x, float* reducted_vec)
// {
// extern __shared__ float reduction_cache[] ;
// //thread ID on each row of blocks
// int tid = blockDim.x * blockIdx.x + threadIdx.x;
// int cache_i = threadIdx.x;
// /* This UNROLLS the elements of x, "outside" the grid's index range.
// In the case of N=600, threadsPerBlock=256 and 2 blocks in total,
// we have 600-256*2=88 additions done in parallel, before the reduction of the 512 threads.
// incase the index-range > N, the reduction scheme will simply add some zeros to the vector.
// This allows as to oversubscribe in terms of threads and blocks.
// */
// int offset = N*blockIdx.y;
// float temp=0;
// while (tid < N)
// {
// temp += K[tid+offset]*x[tid*D+d];
// tid += blockDim.x * gridDim.x;
// }
// /* Load x-data into local shared memory.
// As mentioned before, some entries are small sums of
// x's outside the grid's range */
// reduction_cache[cache_i] = temp;
// __syncthreads();
// // Begin the reduction per shared-memory-block
// for(int i=blockDim.x/2; i>0; i>>=1)
// {
// if(cache_i < i)
// reduction_cache[cache_i] += reduction_cache[cache_i+i];
// __syncthreads();
// }
// // Final Sum is stored in global array, with stride d, to match the NxD dimensionality of the input dataset.
// if(cache_i==0)
// reducted_vec[blockIdx.y*gridDim.x + blockIdx.x + d] = reduction_cache[cache_i];
// }
// void WR_kernelX_dow_product(int N, float* d_K, float* d_x, /*out*/ ReductionCache* rc )
// {
// dim3 blockDim2(4, 1, 1);
// dim3 gridDim2(N/4,N,1);
// size_t cache_size = 4*N*sizeof(float);
// kernelX_dot_product<<<gridDim2, blockDim2, cache_size>>>(N,D,0, d_KernelMatrix, d_x, d_y_new);
// kernelX_dot_product<<<gridDim2, blockDim2, cache_size>>>(N,D,1, d_KernelMatrix, d_x, d_y_new);
// //reduction_sum<<<L/256, 256, 256*sizeof(float) >>>(N/4, d_y_new, d_y_new);
// if(rc->blocksNum == 1)
// {
// kernelX_dot_product<<<rc->gridDim, rc->blockDim, rc->cache_size>>>(N,D,0, d_K,d_x, rc->d_sum);
// kernelX_dot_product<<<rc->gridDim, rc->blockDim, rc->cache_size>>>(N,D,1, d_K,d_x, rc->d_sum);
// }
// else
// {
// // We need multiple reduction calls!
// reduction_sum <<<rc->gridDim, rc->blockDim, rc->cache_size>>>(N, d_A, rc->d_reduced_vec);
// /* Reduct the final reduction vector! */
// // Ideally we would like threads_num==length(reduced_vec)/numRow.
// However threads_num2 must be a power of 2. Thus:
// int threads_num2 = exp2f(floor(log2f(rc->reduced_vec_length/rc->rowNum)));
// if(threads_num2>512)
// threads_num2=512;
// //printf("THREADS: %d RED_VEC %d\n", threads_num2, rc->reduced_vec_length/rc->rowNum );
// dim3 gridDim2(1,rc->rowNum,1);
// dim3 blockDim2(threads_num2,1,1);
// reduction_sum<<<gridDim2, blockDim2, threads_num2*sizeof(float)>>>\
// (rc->gridDim.x, rc->d_reduced_vec, rc->d_sum); //
// // WARNING: launching with original thread_num might be too much.
// // SOLUTION: Find power-of-2 nearest to block_num
// }
// } | 10a54bac8582fafc2af040f81f4ac2f534600d5f.cu | #include <stdio.h>
#include <stdlib.h>
__global__
void calc_meanshift2(float* y_new, float* y_old, float* meanshift)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
float tempY_new = y_new[i];
float tempY_old = y_old[i];
meanshift[i] = (tempY_new-tempY_old)*(tempY_new-tempY_old);
}
__device__
float kernel_fun(float x, float sigma2)
{
if( x > sigma2)
return 0;
else
return exp(-x/2/sigma2);
}
__global__
void calc_Kernel_Matrix(int N, int D, float *x, float *y, float *K, int sigma2)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
// It is also true that ( k == (N*i + j) )
// Calc Dist...
float dist = 0;
for(int d=0; d<D; d++)
dist+= (y[i*D+d] - x[j*D+d])*(y[i*D+d] - x[j*D+d]);
K[i*N+j] = kernel_fun(dist, sigma2);
}
__global__
void kernel_sum_div(int D, float* y_new, float* K_sum)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
for(int d=0; d<D; d++)
y_new[i*D+d] = y_new[i*D+d]/K_sum[i];
}
__global__ void kernel_Dvec_mult(int N, int D, float* K, float* x, float* Kx, int d)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
Kx[i*N+j] = K[i*N+j]*x[j*D+d];
}
__global__ void copy_to_y(int D, float* d_y_new, float* kernelXsum, int d)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
d_y_new[i*D+d] = kernelXsum[i];
}
__global__ void calc_reduce_meanshift(int N, float* y_new, float* y_old, float* reducted_vec)
{
extern __shared__ float reduction_cache[] ;
//thread ID on each row of blocks
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int cache_i = threadIdx.x;
float temp=0;
float tempY_new; // This are usuful to ensure that only on GB access is gonna happen for each vector
float tempY_old;
while (tid < N)
{
tempY_new = y_new[tid];
tempY_old= y_old[tid];
temp += (tempY_new-tempY_old)*(tempY_new-tempY_old);
tid += blockDim.x * gridDim.x;
}
reduction_cache[cache_i] = temp;
__syncthreads();
// Begin the reduction per shared-memory-block
for(int i=blockDim.x/2; i>0; i>>=1)
{
if(cache_i < i)
reduction_cache[cache_i] += reduction_cache[cache_i+i];
__syncthreads();
}
// Final Sum is stored in global array.
if(cache_i==0)
reducted_vec[blockIdx.x] = reduction_cache[0];
}
// __global__
// void kernelX_dot_product(int N, int D, int d, float* K, float* x, float* reducted_vec)
// {
// extern __shared__ float reduction_cache[] ;
// //thread ID on each row of blocks
// int tid = blockDim.x * blockIdx.x + threadIdx.x;
// int cache_i = threadIdx.x;
// /* This UNROLLS the elements of x, "outside" the grid's index range.
// In the case of N=600, threadsPerBlock=256 and 2 blocks in total,
// we have 600-256*2=88 additions done in parallel, before the reduction of the 512 threads.
// incase the index-range > N, the reduction scheme will simply add some zeros to the vector.
// This allows as to oversubscribe in terms of threads and blocks.
// */
// int offset = N*blockIdx.y;
// float temp=0;
// while (tid < N)
// {
// temp += K[tid+offset]*x[tid*D+d];
// tid += blockDim.x * gridDim.x;
// }
// /* Load x-data into local shared memory.
// As mentioned before, some entries are small sums of
// x's outside the grid's range */
// reduction_cache[cache_i] = temp;
// __syncthreads();
// // Begin the reduction per shared-memory-block
// for(int i=blockDim.x/2; i>0; i>>=1)
// {
// if(cache_i < i)
// reduction_cache[cache_i] += reduction_cache[cache_i+i];
// __syncthreads();
// }
// // Final Sum is stored in global array, with stride d, to match the NxD dimensionality of the input dataset.
// if(cache_i==0)
// reducted_vec[blockIdx.y*gridDim.x + blockIdx.x + d] = reduction_cache[cache_i];
// }
// void WR_kernelX_dow_product(int N, float* d_K, float* d_x, /*out*/ ReductionCache* rc )
// {
// dim3 blockDim2(4, 1, 1);
// dim3 gridDim2(N/4,N,1);
// size_t cache_size = 4*N*sizeof(float);
// kernelX_dot_product<<<gridDim2, blockDim2, cache_size>>>(N,D,0, d_KernelMatrix, d_x, d_y_new);
// kernelX_dot_product<<<gridDim2, blockDim2, cache_size>>>(N,D,1, d_KernelMatrix, d_x, d_y_new);
// //reduction_sum<<<L/256, 256, 256*sizeof(float) >>>(N/4, d_y_new, d_y_new);
// if(rc->blocksNum == 1)
// {
// kernelX_dot_product<<<rc->gridDim, rc->blockDim, rc->cache_size>>>(N,D,0, d_K,d_x, rc->d_sum);
// kernelX_dot_product<<<rc->gridDim, rc->blockDim, rc->cache_size>>>(N,D,1, d_K,d_x, rc->d_sum);
// }
// else
// {
// // We need multiple reduction calls!
// reduction_sum <<<rc->gridDim, rc->blockDim, rc->cache_size>>>(N, d_A, rc->d_reduced_vec);
// /* Reduct the final reduction vector! */
// // Ideally we would like threads_num==length(reduced_vec)/numRow.
// However threads_num2 must be a power of 2. Thus:
// int threads_num2 = exp2f(floor(log2f(rc->reduced_vec_length/rc->rowNum)));
// if(threads_num2>512)
// threads_num2=512;
// //printf("THREADS: %d RED_VEC %d\n", threads_num2, rc->reduced_vec_length/rc->rowNum );
// dim3 gridDim2(1,rc->rowNum,1);
// dim3 blockDim2(threads_num2,1,1);
// reduction_sum<<<gridDim2, blockDim2, threads_num2*sizeof(float)>>>\
// (rc->gridDim.x, rc->d_reduced_vec, rc->d_sum); //
// // WARNING: launching with original thread_num might be too much.
// // SOLUTION: Find power-of-2 nearest to block_num
// }
// } |
97ed85c4cf88d95b2012870db5338b8b6657a0a7.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary functions to perform X-ray parallel projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "ray_interpolated_projection_parallel.hpp"
#include <stdio.h>
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("%s \n",msg);\
printf("CBCT:CUDA:Ax_parallel_beam_interpolated",hipGetErrorString(__err));\
hipDeviceReset();\
exit(__err);\
} \
} while (0)
// Declare the texture reference.
texture<float, hipTextureType3D , hipReadModeElementType> tex;
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
__global__ void kernelPixelDetector_parallel( Geometry geo,
float* detector,
Point3D source ,
Point3D deltaU,
Point3D deltaV,
Point3D uvOrigin,
float DSO,
float maxdist){
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long idx = x * geo.nDetecV + y;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV))
return;
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
Point3D S;
S.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x);
S.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y);
S.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
double length=sqrt((S.x-P.x)*(S.x-P.x)+(S.y-P.y)*(S.y-P.y)+(S.z-P.z)*(S.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceil(length/geo.accuracy);//Divide the directional vector by an integer
vectX=(P.x -S.x)/(length);
vectY=(P.y -S.y)/(length);
vectZ=(P.z -S.z)/(length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// limit the amount of mem access after the cube, but before the detector.
if ((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy < length)
length=ceil((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy);
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floor(maxdist/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+S.x;
ty=vectY*i+S.y;
tz=vectZ*i+S.z;
sum += tex3D(tex, tx+0.5, ty+0.5, tz+0.5); // this line is 94% of time.
}
float deltalength=sqrt((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
int interpolation_projection_parallel(float const * const img, Geometry geo, float** result,float const * const angles,int nangles){
// copy data to CUDA memory
hipArray *d_imagedata = 0;
const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_imagedata, &channelDesc, extent);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_imagedata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_imagedata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
//Done! Image put into texture memory.
size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float);
float* dProjection;
hipMalloc((void**)&dProjection, num_bytes);
cudaCheckErrors("hipMalloc fail");
// If we are going to time
bool timekernel=false;
hipEvent_t start, stop;
float elapsedTime;
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
// 16x16 gave the best performance empirically
// Funnily that makes it compatible with most GPUs.....
dim3 grid(ceil((float)geo.nDetecU/32),ceil((float)geo.nDetecV/32),1);
dim3 block(32,32,1);
Point3D source, deltaU, deltaV, uvOrigin;
float maxdist;
for (unsigned int i=0;i<nangles;i++){
geo.alpha=angles[i*3];
geo.theta=angles[i*3+1];
geo.psi =angles[i*3+2];
//precomute distances for faster execution
maxdist=maxdistanceCuboid(geo,i);
//Precompute per angle constant stuff for speed
computeDeltas_parallel(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source);
//Interpolation!!
hipLaunchKernelGGL(( kernelPixelDetector_parallel), dim3(grid),dim3(block), 0, 0, geo,dProjection, source, deltaU, deltaV, uvOrigin,geo.DSO[i],floor(maxdist));
cudaCheckErrors("Kernel fail");
// copy result to host
hipMemcpy(result[i], dProjection, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy fail");
}
if (timekernel){
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
printf("%f\n" ,elapsedTime);
}
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dProjection);
hipFreeArray(d_imagedata);
cudaCheckErrors("hipFree d_imagedata fail");
hipDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_parallel(Geometry geo, float alpha,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5);
S.z=geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
//1: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
| 97ed85c4cf88d95b2012870db5338b8b6657a0a7.cu | /*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary functions to perform X-ray parallel projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "ray_interpolated_projection_parallel.hpp"
#include <stdio.h>
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("%s \n",msg);\
printf("CBCT:CUDA:Ax_parallel_beam_interpolated",cudaGetErrorString(__err));\
cudaDeviceReset();\
exit(__err);\
} \
} while (0)
// Declare the texture reference.
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
__global__ void kernelPixelDetector_parallel( Geometry geo,
float* detector,
Point3D source ,
Point3D deltaU,
Point3D deltaV,
Point3D uvOrigin,
float DSO,
float maxdist){
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long idx = x * geo.nDetecV + y;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV))
return;
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
Point3D S;
S.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x);
S.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y);
S.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
double length=sqrt((S.x-P.x)*(S.x-P.x)+(S.y-P.y)*(S.y-P.y)+(S.z-P.z)*(S.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceil(length/geo.accuracy);//Divide the directional vector by an integer
vectX=(P.x -S.x)/(length);
vectY=(P.y -S.y)/(length);
vectZ=(P.z -S.z)/(length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// limit the amount of mem access after the cube, but before the detector.
if ((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy < length)
length=ceil((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy);
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floor(maxdist/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+S.x;
ty=vectY*i+S.y;
tz=vectZ*i+S.z;
sum += tex3D(tex, tx+0.5, ty+0.5, tz+0.5); // this line is 94% of time.
}
float deltalength=sqrt((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
int interpolation_projection_parallel(float const * const img, Geometry geo, float** result,float const * const angles,int nangles){
// copy data to CUDA memory
cudaArray *d_imagedata = 0;
const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_imagedata, &channelDesc, extent);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_imagedata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_imagedata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
//Done! Image put into texture memory.
size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float);
float* dProjection;
cudaMalloc((void**)&dProjection, num_bytes);
cudaCheckErrors("cudaMalloc fail");
// If we are going to time
bool timekernel=false;
cudaEvent_t start, stop;
float elapsedTime;
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
// 16x16 gave the best performance empirically
// Funnily that makes it compatible with most GPUs.....
dim3 grid(ceil((float)geo.nDetecU/32),ceil((float)geo.nDetecV/32),1);
dim3 block(32,32,1);
Point3D source, deltaU, deltaV, uvOrigin;
float maxdist;
for (unsigned int i=0;i<nangles;i++){
geo.alpha=angles[i*3];
geo.theta=angles[i*3+1];
geo.psi =angles[i*3+2];
//precomute distances for faster execution
maxdist=maxdistanceCuboid(geo,i);
//Precompute per angle constant stuff for speed
computeDeltas_parallel(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source);
//Interpolation!!
kernelPixelDetector_parallel<<<grid,block>>>(geo,dProjection, source, deltaU, deltaV, uvOrigin,geo.DSO[i],floor(maxdist));
cudaCheckErrors("Kernel fail");
// copy result to host
cudaMemcpy(result[i], dProjection, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy fail");
}
if (timekernel){
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("%f\n" ,elapsedTime);
}
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dProjection);
cudaFreeArray(d_imagedata);
cudaCheckErrors("cudaFree d_imagedata fail");
cudaDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_parallel(Geometry geo, float alpha,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5);
S.z=geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
//1: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
|
c2f5e158028ecdcaa1539d5b2a038e284b646df2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Equihash solver created by djeZo ([email protected]) for NiceHash
* Adapted to be more compatible with older C++ compilers
*
* cuda_djezo solver was released by NiceHash (www.nicehash.com) under
* GPL 3.0 license. If you don't have a copy, you can obtain one from
* https://www.gnu.org/licenses/gpl-3.0.txt
*
* Based on CUDA solver by John Tromp released under MIT license.
* Some helper functions taken out of OpenCL solver by Marc Bevand
* released under MIT license.
*
* Copyright (c) 2016 John Tromp, Marc Bevand
* Copyright (c) 2017 djeZo, Tanguy Pruvot (GPL v3)
*/
#ifdef WIN32
#include <Windows.h>
#endif
#include <stdio.h>
#include <vector>
//#include <mutex>
#include "equihash.h"
#include "eqcuda.hpp" // eq_cuda_context
#include "blake2/blake2.h"
//#define WN 200
//#define WK 9
#ifndef MAX_GPUS
#define MAX_GPUS 16
#endif
#define NDIGITS (WK+1)
#define DIGITBITS (WN/(NDIGITS))
#define PROOFSIZE (1<<WK)
#define BASE (1<<DIGITBITS)
#define NHASHES (2*BASE)
#define HASHESPERBLAKE (512/WN)
#define HASHOUT (HASHESPERBLAKE*WN/8)
#define NBLOCKS ((NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE)
#define BUCKBITS (DIGITBITS - RB)
#define NBUCKETS (1 << BUCKBITS)
#define BUCKMASK (NBUCKETS - 1)
#define SLOTBITS (RB + 2)
#define SLOTRANGE (1 << SLOTBITS)
#define NSLOTS SM
#define SLOTMASK (SLOTRANGE - 1)
#define NRESTS (1 << RB)
#define RESTMASK (NRESTS - 1)
#define CANTORBITS (2 * SLOTBITS - 2)
#define CANTORMASK ((1 << CANTORBITS) - 1)
#define CANTORMAXSQRT (2 * NSLOTS)
#define RB8_NSLOTS 640
#define RB8_NSLOTS_LD 624
#define FD_THREADS 128
#ifdef __INTELLISENSE__
// reduce vstudio editor warnings
#include <hip/device_functions.h>
#include <device_launch_parameters.h>
#define __launch_bounds__(max_tpb, min_blocks)
#define __CUDA_ARCH__ 520
uint32_t __byte_perm(uint32_t x, uint32_t y, uint32_t z);
uint32_t __byte_perm(uint32_t x, uint32_t y, uint32_t z);
uint32_t __shfl2(uint32_t x, uint32_t y);
uint32_t __shfl_sync(uint32_t mask, uint32_t x, uint32_t y);
uint32_t atomicExch(uint32_t *x, uint32_t y);
uint32_t atomicAdd(uint32_t *x, uint32_t y);
void __syncthreads(void);
void __threadfence(void);
void __threadfence_block(void);
uint32_t __ldg(const uint32_t* address);
uint64_t __ldg(const uint64_t* address);
uint4 __ldca(const uint4 *ptr);
u32 __ldca(const u32 *ptr);
u32 umin(const u32, const u32);
u32 umax(const u32, const u32);
#endif
#define OPT_SYNC_ALL
#if TORCH_HIP_VERSION >= 9000 && __CUDA_ARCH__ >= 300
#define __shfl2(var, srcLane) __shfl_sync(0xFFFFFFFFu, var, srcLane)
#undef __any
#define __any(p) __any_sync(0xFFFFFFFFu, p)
#else
#define __shfl2 __shfl
#endif
typedef u32 proof[PROOFSIZE];
struct __align__(32) slot {
u32 hash[8];
};
struct __align__(16) slotsmall {
u32 hash[4];
};
struct __align__(8) slottiny {
u32 hash[2];
};
template <u32 RB, u32 SM>
struct equi
{
slot round0trees[4096][RB8_NSLOTS];
slot trees[1][NBUCKETS][NSLOTS];
struct {
slotsmall treessmall[NSLOTS];
slottiny treestiny[NSLOTS];
} round2trees[NBUCKETS];
struct {
slotsmall treessmall[NSLOTS];
slottiny treestiny[NSLOTS];
} round3trees[NBUCKETS];
slotsmall treessmall[4][NBUCKETS][NSLOTS];
slottiny treestiny[1][4096][RB8_NSLOTS_LD];
u32 round4bidandsids[NBUCKETS][NSLOTS];
union {
u64 blake_h[8];
u32 blake_h32[16];
};
struct {
u32 nslots8[4096];
u32 nslots0[4096];
u32 nslots[9][NBUCKETS];
scontainerreal srealcont;
} edata;
};
// todo: use cuda_helper.h and/or cuda_vector.h
__device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b)
{
return make_uint2(a.x ^ b.x, a.y ^ b.y);
}
__device__ __forceinline__ uint4 operator^ (uint4 a, uint4 b)
{
return make_uint4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w);
}
// for ROR 63 (or ROL 1); this func only support (32 <= offset < 64)
__device__ __forceinline__ uint2 ROR2(const uint2 a, const int offset)
{
uint2 result;
#if __CUDA_ARCH__ > 300
{
asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset));
asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset));
}
#else
result.y = ((a.x >> (offset - 32)) | (a.y << (64 - offset)));
result.x = ((a.y >> (offset - 32)) | (a.x << (64 - offset)));
#endif
return result;
}
__device__ __forceinline__ uint2 SWAPUINT2(uint2 value)
{
return make_uint2(value.y, value.x);
}
__device__ __forceinline__ uint2 ROR24(const uint2 a)
{
uint2 result;
result.x = __byte_perm(a.y, a.x, 0x2107);
result.y = __byte_perm(a.y, a.x, 0x6543);
return result;
}
__device__ __forceinline__ uint2 ROR16(const uint2 a)
{
uint2 result;
result.x = __byte_perm(a.y, a.x, 0x1076);
result.y = __byte_perm(a.y, a.x, 0x5432);
return result;
}
__device__ __forceinline__ void G2(u64 & a, u64 & b, u64 & c, u64 & d, u64 x, u64 y)
{
a = a + b + x;
((uint2*)&d)[0] = SWAPUINT2(((uint2*)&d)[0] ^ ((uint2*)&a)[0]);
c = c + d;
((uint2*)&b)[0] = ROR24(((uint2*)&b)[0] ^ ((uint2*)&c)[0]);
a = a + b + y;
((uint2*)&d)[0] = ROR16(((uint2*)&d)[0] ^ ((uint2*)&a)[0]);
c = c + d;
((uint2*)&b)[0] = ROR2(((uint2*)&b)[0] ^ ((uint2*)&c)[0], 63U);
}
// untested..
struct packer_default
{
__device__ __forceinline__ static u32 set_bucketid_and_slots(const u32 bucketid, const u32 s0, const u32 s1, const u32 RB, const u32 SM)
{
return (((bucketid << SLOTBITS) | s0) << SLOTBITS) | s1;
}
__device__ __forceinline__ static u32 get_bucketid(const u32 bid, const u32 RB, const u32 SM)
{
// BUCKMASK-ed to prevent illegal memory accesses in case of memory errors
return (bid >> (2 * SLOTBITS)) & BUCKMASK;
}
__device__ __forceinline__ static u32 get_slot0(const u32 bid, const u32 s1, const u32 RB, const u32 SM)
{
return bid & SLOTMASK;
}
__device__ __forceinline__ static u32 get_slot1(const u32 bid, const u32 RB, const u32 SM)
{
return (bid >> SLOTBITS) & SLOTMASK;
}
};
struct packer_cantor
{
__device__ __forceinline__ static u32 cantor(const u32 s0, const u32 s1)
{
u32 a = umax(s0, s1);
u32 b = umin(s0, s1);
return a * (a + 1) / 2 + b;
}
__device__ __forceinline__ static u32 set_bucketid_and_slots(const u32 bucketid, const u32 s0, const u32 s1, const u32 RB, const u32 SM)
{
return (bucketid << CANTORBITS) | cantor(s0, s1);
}
__device__ __forceinline__ static u32 get_bucketid(const u32 bid, const u32 RB, const u32 SM)
{
return (bid >> CANTORBITS) & BUCKMASK;
}
__device__ __forceinline__ static u32 get_slot0(const u32 bid, const u32 s1, const u32 RB, const u32 SM)
{
return ((bid & CANTORMASK) - cantor(0, s1)) & SLOTMASK;
}
__device__ __forceinline__ static u32 get_slot1(const u32 bid, const u32 RB, const u32 SM)
{
u32 k, q, sqr = 8 * (bid & CANTORMASK) + 1;
// this k=sqrt(sqr) computing loop averages 3.4 iterations out of maximum 9
for (k = CANTORMAXSQRT; (q = sqr / k) < k; k = (k + q) / 2);
return ((k - 1) / 2) & SLOTMASK;
}
};
__device__ __constant__ const u64 blake_iv[] = {
0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f,
0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
};
#if CUDART_VERSION < 8000 || !defined(__ldca)
#define __ldca(ptr) *(ptr)
#endif
template <u32 RB, u32 SM, typename PACKER>
__global__ void digit_first(equi<RB, SM>* eq, u32 nonce)
{
const u32 block = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ u64 hash_h[8];
u32* hash_h32 = (u32*)hash_h;
if (threadIdx.x < 16)
hash_h32[threadIdx.x] = __ldca(&eq->blake_h32[threadIdx.x]);
__syncthreads();
u64 m = (u64)block << 32 | (u64)nonce;
union
{
u64 v[16];
u32 v32[32];
uint4 v128[8];
};
v[0] = hash_h[0];
v[1] = hash_h[1];
v[2] = hash_h[2];
v[3] = hash_h[3];
v[4] = hash_h[4];
v[5] = hash_h[5];
v[6] = hash_h[6];
v[7] = hash_h[7];
v[8] = blake_iv[0];
v[9] = blake_iv[1];
v[10] = blake_iv[2];
v[11] = blake_iv[3];
v[12] = blake_iv[4] ^ (128 + 16);
v[13] = blake_iv[5];
v[14] = blake_iv[6] ^ 0xffffffffffffffff;
v[15] = blake_iv[7];
// mix 1
G2(v[0], v[4], v[8], v[12], 0, m);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 2
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], m, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 3
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, m);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 4
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, m);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 5
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, m);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 6
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], m, 0);
// mix 7
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], m, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 8
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, m);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 9
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], m, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 10
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], m, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 11
G2(v[0], v[4], v[8], v[12], 0, m);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 12
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], m, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
v[0] ^= hash_h[0] ^ v[8];
v[1] ^= hash_h[1] ^ v[9];
v[2] ^= hash_h[2] ^ v[10];
v[3] ^= hash_h[3] ^ v[11];
v[4] ^= hash_h[4] ^ v[12];
v[5] ^= hash_h[5] ^ v[13];
v32[12] ^= hash_h32[12] ^ v32[28];
u32 bexor = __byte_perm(v32[0], 0, 0x4012); // first 20 bits
u32 bucketid;
asm("bfe.u32 %0, %1, 12, 12;" : "=r"(bucketid) : "r"(bexor));
u32 slotp = atomicAdd(&eq->edata.nslots0[bucketid], 1);
if (slotp < RB8_NSLOTS)
{
slot* s = &eq->round0trees[bucketid][slotp];
uint4 tt;
tt.x = __byte_perm(v32[0], v32[1], 0x1234);
tt.y = __byte_perm(v32[1], v32[2], 0x1234);
tt.z = __byte_perm(v32[2], v32[3], 0x1234);
tt.w = __byte_perm(v32[3], v32[4], 0x1234);
*(uint4*)(&s->hash[0]) = tt;
tt.x = __byte_perm(v32[4], v32[5], 0x1234);
tt.y = __byte_perm(v32[5], v32[6], 0x1234);
tt.z = 0;
tt.w = block << 1;
*(uint4*)(&s->hash[4]) = tt;
}
bexor = __byte_perm(v32[6], 0, 0x0123);
asm("bfe.u32 %0, %1, 12, 12;" : "=r"(bucketid) : "r"(bexor));
slotp = atomicAdd(&eq->edata.nslots0[bucketid], 1);
if (slotp < RB8_NSLOTS)
{
slot* s = &eq->round0trees[bucketid][slotp];
uint4 tt;
tt.x = __byte_perm(v32[6], v32[7], 0x2345);
tt.y = __byte_perm(v32[7], v32[8], 0x2345);
tt.z = __byte_perm(v32[8], v32[9], 0x2345);
tt.w = __byte_perm(v32[9], v32[10], 0x2345);
*(uint4*)(&s->hash[0]) = tt;
tt.x = __byte_perm(v32[10], v32[11], 0x2345);
tt.y = __byte_perm(v32[11], v32[12], 0x2345);
tt.z = 0;
tt.w = (block << 1) + 1;
*(uint4*)(&s->hash[4]) = tt;
}
}
/*
Functions digit_1 to digit_8 works by the same principle;
Each thread does 2-3 slot loads (loads are coalesced).
Xorwork of slots is loaded into shared memory and is kept in registers (except for digit_1).
At the same time, restbits (8 or 9 bits) in xorwork are used for collisions.
Restbits determine position in ht.
Following next is pair creation. First one (or two) pairs' xorworks are put into global memory
as soon as possible, the rest pairs are saved in shared memory (one u32 per pair - 16 bit indices).
In most cases, all threads have one (or two) pairs so with this trick, we offload memory writes a bit in last step.
In last step we save xorwork of pairs in memory.
*/
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_1(equi<RB, SM>* eq)
{
__shared__ u16 ht[256][SSM - 1];
__shared__ uint2 lastword1[RB8_NSLOTS];
__shared__ uint4 lastword2[RB8_NSLOTS];
__shared__ int ht_len[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < 256)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots0[bucketid], RB8_NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
uint2 ta[2];
uint4 tb[2];
u32 si[2];
#ifdef OPT_SYNC_ALL
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
__syncthreads();
#endif
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
const slot* pslot1 = eq->round0trees[bucketid] + si[i];
// get xhash
uint4 a1 = *(uint4*)(&pslot1->hash[0]);
uint2 a2 = *(uint2*)(&pslot1->hash[4]);
ta[i].x = a1.x;
ta[i].y = a1.y;
lastword1[si[i]] = ta[i];
tb[i].x = a1.z;
tb[i].y = a1.w;
tb[i].z = a2.x;
tb[i].w = a2.y;
lastword2[si[i]] = tb[i];
asm("bfe.u32 %0, %1, 20, 8;" : "=r"(hr[i]) : "r"(ta[i].x));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
int* pairs = ht_len;
u32 xors[6];
u32 xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = ta[i] ^ lastword1[p];
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[1][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[2]) = lastword2[si[i]] ^ lastword2[p];
slot &xs = eq->trees[0][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
uint4 ttx;
ttx.x = xors[5];
ttx.y = xors[0];
ttx.z = packer_default::set_bucketid_and_slots(bucketid, si[i], p, 8, RB8_NSLOTS);
ttx.w = 0;
*(uint4*)(&xs.hash[4]) = ttx;
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = lastword1[i] ^ lastword1[k];
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[1][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[2]) = lastword2[i] ^ lastword2[k];
slot &xs = eq->trees[0][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
uint4 ttx;
ttx.x = xors[5];
ttx.y = xors[0];
ttx.z = packer_default::set_bucketid_and_slots(bucketid, i, k, 8, RB8_NSLOTS);
ttx.w = 0;
*(uint4*)(&xs.hash[4]) = ttx;
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_2(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][SSM - 1];
__shared__ u32 lastword1[NSLOTS];
__shared__ uint4 lastword2[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
slot* buck = eq->trees[0][bucketid];
u32 bsize = umin(eq->edata.nslots[1][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 ta[2];
uint4 tt[2];
u32 si[2];
#ifdef OPT_SYNC_ALL
__syncthreads();
#endif
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
// get slot
const slot* pslot1 = buck + si[i];
uint4 ttx = *(uint4*)(&pslot1->hash[0]);
lastword1[si[i]] = ta[i] = ttx.x;
uint2 tty = *(uint2*)(&pslot1->hash[4]);
tt[i].x = ttx.y;
tt[i].y = ttx.z;
tt[i].z = ttx.w;
tt[i].w = tty.x;
lastword2[si[i]] = tt[i];
hr[i] = tty.y & RESTMASK;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[5];
u32 xorbucketid, xorslot;
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[0] = ta[i] ^ lastword1[p];
xorbucketid = xors[0] >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[2][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[1]) = tt[i] ^ lastword2[p];
slotsmall &xs = eq->round2trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
slottiny &xst = eq->round2trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = xors[4];
ttx.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
xors[0] = lastword1[i] ^ lastword1[k];
xorbucketid = xors[0] >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[2][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[1]) = lastword2[i] ^ lastword2[k];
slotsmall &xs = eq->round2trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
slottiny &xst = eq->round2trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = xors[4];
ttx.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_3(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword1[NSLOTS];
__shared__ u32 lastword2[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots[2][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
u32 ta[2];
#ifdef OPT_SYNC_ALL
__syncthreads();
#endif
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
slotsmall &xs = eq->round2trees[bucketid].treessmall[si[i]];
slottiny &xst = eq->round2trees[bucketid].treestiny[si[i]];
tt[i] = *(uint4*)(&xs.hash[0]);
lastword1[si[i]] = tt[i];
ta[i] = xst.hash[0];
lastword2[si[i]] = ta[i];
asm("bfe.u32 %0, %1, 12, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[5];
u32 bexor, xorbucketid, xorslot;
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[4] = ta[i] ^ lastword2[p];
if (xors[4] != 0)
{
*(uint4*)(&xors[0]) = tt[i] ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x2107);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[3][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->round3trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
slottiny &xst = eq->round3trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = bexor;
ttx.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
xors[4] = lastword2[i] ^ lastword2[k];
if (xors[4] != 0)
{
*(uint4*)(&xors[0]) = lastword1[i] ^ lastword1[k];
bexor = __byte_perm(xors[0], xors[1], 0x2107);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[3][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->round3trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
slottiny &xst = eq->round3trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = bexor;
ttx.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_4(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots[3][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
#ifdef OPT_SYNC_ALL
__syncthreads();
#endif
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
slotsmall &xs = eq->round3trees[bucketid].treessmall[si[i]];
slottiny &xst = eq->round3trees[bucketid].treestiny[si[i]];
// get xhash
tt[i] = *(uint4*)(&xs.hash[0]);
lastword[si[i]] = tt[i];
hr[i] = xst.hash[0] & RESTMASK;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[4];
u32 xorbucketid, xorslot;
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint4*)(&xors[0]) = tt[i] ^ lastword[p];
if (xors[3] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(4 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[4][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[3][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
eq->round4bidandsids[xorbucketid][xorslot] = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint4*)(&xors[0]) = lastword[i] ^ lastword[k];
if (xors[3] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(4 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[4][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[3][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
eq->round4bidandsids[xorbucketid][xorslot] = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_5(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
slotsmall* buck = eq->treessmall[3][bucketid];
u32 bsize = umin(eq->edata.nslots[4][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
#ifdef OPT_SYNC_ALL
__syncthreads();
#endif
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
tt[i] = *(uint4*)(&pslot1->hash[0]);
lastword[si[i]] = tt[i];
asm("bfe.u32 %0, %1, 4, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[4];
u32 bexor, xorbucketid, xorslot;
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint4*)(&xors[0]) = tt[i] ^ lastword[p];
if (xors[3] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x1076);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[5][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[2][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = xors[3];
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint4*)(&xors[0]) = lastword[i] ^ lastword[k];
if (xors[3] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x1076);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[5][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[2][xorbucketid][xorslot];
uint4 tt;
tt.x = xors[1];
tt.y = xors[2];
tt.z = xors[3];
tt.w = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint4*)(&xs.hash[0]) = tt;
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_6(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint2 lastword1[NSLOTS];
__shared__ u32 lastword2[NSLOTS];
__shared__ int ht_len[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
pairs_len = 0;
next_pair = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[5][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[2][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint4 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
tt[i] = *(uint4*)(&pslot1->hash[0]);
lastword1[si[i]] = *(uint2*)(&tt[i].x);
lastword2[si[i]] = tt[i].z;
asm("bfe.u32 %0, %1, 16, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
// doing this to save shared memory
int* pairs = ht_len;
__syncthreads();
u32 xors[3];
u32 bexor, xorbucketid, xorslot;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[2] = tt[i].z ^ lastword2[p];
if (xors[2] != 0)
{
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
xors[2] = tt[i].z ^ lastword2[p];
if (xors[2] != 0)
{
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
u32 pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
xors[2] = lastword2[i] ^ lastword2[k];
if (xors[2] == 0)
continue;
*(uint2*)(&xors[0]) = lastword1[i] ^ lastword1[k];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot >= NSLOTS) continue;
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_7(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ u32 lastword[NSLOTS][2];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
pairs_len = 0;
next_pair = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[6][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[0][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint4 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
// get xhash
tt[i] = *(uint4*)(&pslot1->hash[0]);
*(uint2*)(&lastword[si[i]][0]) = *(uint2*)(&tt[i].x);
asm("bfe.u32 %0, %1, 12, %2;" : "=r"(hr[i]) : "r"(tt[i].z), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[2];
u32 xorbucketid, xorslot;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[0];
ttx.y = xors[1];
ttx.z = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
ttx.w = 0;
*(uint4*)(&xs.hash[0]) = ttx;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[0];
ttx.y = xors[1];
ttx.z = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
ttx.w = 0;
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = *(uint2*)(&lastword[i][0]) ^ *(uint2*)(&lastword[k][0]);
if (xors[1] == 0)
continue;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot >= NSLOTS) continue;
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 tt;
tt.x = xors[0];
tt.y = xors[1];
tt.z = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
tt.w = 0;
*(uint4*)(&xs.hash[0]) = tt;
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_8(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ u32 lastword[NSLOTS][2];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
next_pair = 0;
pairs_len = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[7][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[1][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint2 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
// get xhash
tt[i] = *(uint2*)(&pslot1->hash[0]);
*(uint2*)(&lastword[si[i]][0]) = *(uint2*)(&tt[i].x);
asm("bfe.u32 %0, %1, 8, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[2];
u32 bexor, xorbucketid, xorslot;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot < RB8_NSLOTS_LD)
{
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot < RB8_NSLOTS_LD)
{
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = *(uint2*)(&lastword[i][0]) ^ *(uint2*)(&lastword[k][0]);
if (xors[1] == 0)
continue;
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot >= RB8_NSLOTS_LD) continue;
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
/*
Last round function is similar to previous ones but has different ending.
We use warps to process final candidates. Each warp process one candidate.
First two bidandsids (u32 of stored bucketid and two slotids) are retreived by
lane 0 and lane 16, next four bidandsids by lane 0, 8, 16 and 24, ... until
all lanes in warp have bidandsids from round 4. Next, each thread retreives
16 indices. While doing so, indices are put into comparison using atomicExch
to determine if there are duplicates (tromp's method). At the end, if no
duplicates are found, candidate solution is saved (all indices). Note that this
dup check method is not exact so CPU dup checking is needed after.
*/
template <u32 RB, u32 SM, int SSM, u32 FCT, typename PACKER, u32 MAXPAIRS, u32 DUPBITS, u32 W>
__global__ void digit_last_wdc(equi<RB, SM>* eq)
{
__shared__ u8 shared_data[8192];
int* ht_len = (int*)(&shared_data[0]);
int* pairs = ht_len;
u32* lastword = (u32*)(&shared_data[256 * 4]);
u16* ht = (u16*)(&shared_data[256 * 4 + RB8_NSLOTS_LD * 4]);
u32* pairs_len = (u32*)(&shared_data[8188]);
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
#pragma unroll
for (u32 i = 0; i < FCT; i++)
ht_len[(i * (256 / FCT)) + threadid] = 0;
if (threadid == ((256 / FCT) - 1))
*pairs_len = 0;
slottiny* buck = eq->treestiny[0][bucketid];
u32 bsize = umin(eq->edata.nslots8[bucketid], RB8_NSLOTS_LD);
u32 si[3 * FCT];
u32 hr[3 * FCT];
int pos[3 * FCT];
u32 lw[3 * FCT];
#pragma unroll
for (u32 i = 0; i < (3 * FCT); i++)
pos[i] = SSM;
__syncthreads();
#pragma unroll
for (u32 i = 0; i < (3 * FCT); i++)
{
si[i] = i * (256 / FCT) + threadid;
if (si[i] >= bsize) break;
const slottiny* pslot1 = buck + si[i];
// get xhash
uint2 tt = *(uint2*)(&pslot1->hash[0]);
lw[i] = tt.x;
lastword[si[i]] = lw[i];
u32 a;
asm("bfe.u32 %0, %1, 20, 8;" : "=r"(a) : "r"(lw[i]));
hr[i] = a;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1))
ht[hr[i] * (SSM - 1) + pos[i]] = si[i];
}
__syncthreads();
#pragma unroll
for (u32 i = 0; i < (3 * FCT); i++)
{
if (pos[i] >= SSM) continue;
for (int k = 0; k != pos[i]; ++k)
{
u16 prev = ht[hr[i] * (SSM - 1) + k];
if (lw[i] != lastword[prev]) continue;
u32 pindex = atomicAdd(pairs_len, 1);
if (pindex >= MAXPAIRS) break;
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
__syncthreads();
u32 plen = umin(*pairs_len, 64);
#define CALC_LEVEL(a, b, c, d) { \
u32 plvl = levels[b]; \
u32* bucks = eq->round4bidandsids[PACKER::get_bucketid(plvl, RB, SM)]; \
u32 slot1 = PACKER::get_slot1(plvl, RB, SM); \
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM); \
levels[b] = bucks[slot1]; \
levels[c] = bucks[slot0]; \
}
#define CALC_LEVEL_SMALL(a, b, c, d) { \
u32 plvl = levels[b]; \
slotsmall* bucks = eq->treessmall[a][PACKER::get_bucketid(plvl, RB, SM)]; \
u32 slot1 = PACKER::get_slot1(plvl, RB, SM); \
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM); \
levels[b] = bucks[slot1].hash[d]; \
levels[c] = bucks[slot0].hash[d]; \
}
u32 lane = threadIdx.x & 0x1f;
u32 par = threadIdx.x >> 5;
u32* levels = (u32*)&pairs[MAXPAIRS + (par << DUPBITS)];
u32* susp = levels;
while (par < plen)
{
int pair = pairs[par];
par += W;
if (lane % 16 == 0)
{
u32 plvl;
if (lane == 0) plvl = buck[__byte_perm(pair, 0, 0x4510)].hash[1];
else plvl = buck[__byte_perm(pair, 0, 0x4532)].hash[1];
slotsmall* bucks = eq->treessmall[1][PACKER::get_bucketid(plvl, RB, SM)];
u32 slot1 = PACKER::get_slot1(plvl, RB, SM);
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM);
levels[lane] = bucks[slot1].hash[2];
levels[lane + 8] = bucks[slot0].hash[2];
}
if (lane % 8 == 0)
CALC_LEVEL_SMALL(0, lane, lane + 4, 3);
if (lane % 4 == 0)
CALC_LEVEL_SMALL(2, lane, lane + 2, 3);
if (lane % 2 == 0)
CALC_LEVEL(0, lane, lane + 1, 4);
u32 ind[16];
u32 f1 = levels[lane];
const slottiny* buck_v4 = &eq->round3trees[PACKER::get_bucketid(f1, RB, SM)].treestiny[0];
const u32 slot1_v4 = PACKER::get_slot1(f1, RB, SM);
const u32 slot0_v4 = PACKER::get_slot0(f1, slot1_v4, RB, SM);
susp[lane] = 0xffffffff;
susp[32 + lane] = 0xffffffff;
#define CHECK_DUP(a) \
__any(atomicExch(&susp[(ind[a] & ((1 << DUPBITS) - 1))], (ind[a] >> DUPBITS)) == (ind[a] >> DUPBITS))
u32 f2 = buck_v4[slot1_v4].hash[1];
const slottiny* buck_v3_1 = &eq->round2trees[PACKER::get_bucketid(f2, RB, SM)].treestiny[0];
const u32 slot1_v3_1 = PACKER::get_slot1(f2, RB, SM);
const u32 slot0_v3_1 = PACKER::get_slot0(f2, slot1_v3_1, RB, SM);
susp[64 + lane] = 0xffffffff;
susp[96 + lane] = 0xffffffff;
u32 f0 = buck_v3_1[slot1_v3_1].hash[1];
const slot* buck_v2_1 = eq->trees[0][PACKER::get_bucketid(f0, RB, SM)];
const u32 slot1_v2_1 = PACKER::get_slot1(f0, RB, SM);
const u32 slot0_v2_1 = PACKER::get_slot0(f0, slot1_v2_1, RB, SM);
susp[128 + lane] = 0xffffffff;
susp[160 + lane] = 0xffffffff;
u32 f3 = buck_v2_1[slot1_v2_1].hash[6];
const slot* buck_fin_1 = eq->round0trees[packer_default::get_bucketid(f3, 8, RB8_NSLOTS)];
const u32 slot1_fin_1 = packer_default::get_slot1(f3, 8, RB8_NSLOTS);
const u32 slot0_fin_1 = packer_default::get_slot0(f3, slot1_fin_1, 8, RB8_NSLOTS);
susp[192 + lane] = 0xffffffff;
susp[224 + lane] = 0xffffffff;
ind[0] = buck_fin_1[slot1_fin_1].hash[7];
if (CHECK_DUP(0)) continue;
ind[1] = buck_fin_1[slot0_fin_1].hash[7];
if (CHECK_DUP(1)) continue;
u32 f4 = buck_v2_1[slot0_v2_1].hash[6];
const slot* buck_fin_2 = eq->round0trees[packer_default::get_bucketid(f4, 8, RB8_NSLOTS)];
const u32 slot1_fin_2 = packer_default::get_slot1(f4, 8, RB8_NSLOTS);
const u32 slot0_fin_2 = packer_default::get_slot0(f4, slot1_fin_2, 8, RB8_NSLOTS);
ind[2] = buck_fin_2[slot1_fin_2].hash[7];
if (CHECK_DUP(2)) continue;
ind[3] = buck_fin_2[slot0_fin_2].hash[7];
if (CHECK_DUP(3)) continue;
u32 f5 = buck_v3_1[slot0_v3_1].hash[1];
const slot* buck_v2_2 = eq->trees[0][PACKER::get_bucketid(f5, RB, SM)];
const u32 slot1_v2_2 = PACKER::get_slot1(f5, RB, SM);
const u32 slot0_v2_2 = PACKER::get_slot0(f5, slot1_v2_2, RB, SM);
u32 f6 = buck_v2_2[slot1_v2_2].hash[6];
const slot* buck_fin_3 = eq->round0trees[packer_default::get_bucketid(f6, 8, RB8_NSLOTS)];
const u32 slot1_fin_3 = packer_default::get_slot1(f6, 8, RB8_NSLOTS);
const u32 slot0_fin_3 = packer_default::get_slot0(f6, slot1_fin_3, 8, RB8_NSLOTS);
ind[4] = buck_fin_3[slot1_fin_3].hash[7];
if (CHECK_DUP(4)) continue;
ind[5] = buck_fin_3[slot0_fin_3].hash[7];
if (CHECK_DUP(5)) continue;
u32 f7 = buck_v2_2[slot0_v2_2].hash[6];
const slot* buck_fin_4 = eq->round0trees[packer_default::get_bucketid(f7, 8, RB8_NSLOTS)];
const u32 slot1_fin_4 = packer_default::get_slot1(f7, 8, RB8_NSLOTS);
const u32 slot0_fin_4 = packer_default::get_slot0(f7, slot1_fin_4, 8, RB8_NSLOTS);
ind[6] = buck_fin_4[slot1_fin_4].hash[7];
if (CHECK_DUP(6)) continue;
ind[7] = buck_fin_4[slot0_fin_4].hash[7];
if (CHECK_DUP(7)) continue;
u32 f8 = buck_v4[slot0_v4].hash[1];
const slottiny* buck_v3_2 = &eq->round2trees[PACKER::get_bucketid(f8, RB, SM)].treestiny[0];
const u32 slot1_v3_2 = PACKER::get_slot1(f8, RB, SM);
const u32 slot0_v3_2 = PACKER::get_slot0(f8, slot1_v3_2, RB, SM);
u32 f9 = buck_v3_2[slot1_v3_2].hash[1];
const slot* buck_v2_3 = eq->trees[0][PACKER::get_bucketid(f9, RB, SM)];
const u32 slot1_v2_3 = PACKER::get_slot1(f9, RB, SM);
const u32 slot0_v2_3 = PACKER::get_slot0(f9, slot1_v2_3, RB, SM);
u32 f10 = buck_v2_3[slot1_v2_3].hash[6];
const slot* buck_fin_5 = eq->round0trees[packer_default::get_bucketid(f10, 8, RB8_NSLOTS)];
const u32 slot1_fin_5 = packer_default::get_slot1(f10, 8, RB8_NSLOTS);
const u32 slot0_fin_5 = packer_default::get_slot0(f10, slot1_fin_5, 8, RB8_NSLOTS);
ind[8] = buck_fin_5[slot1_fin_5].hash[7];
if (CHECK_DUP(8)) continue;
ind[9] = buck_fin_5[slot0_fin_5].hash[7];
if (CHECK_DUP(9)) continue;
u32 f11 = buck_v2_3[slot0_v2_3].hash[6];
const slot* buck_fin_6 = eq->round0trees[packer_default::get_bucketid(f11, 8, RB8_NSLOTS)];
const u32 slot1_fin_6 = packer_default::get_slot1(f11, 8, RB8_NSLOTS);
const u32 slot0_fin_6 = packer_default::get_slot0(f11, slot1_fin_6, 8, RB8_NSLOTS);
ind[10] = buck_fin_6[slot1_fin_6].hash[7];
if (CHECK_DUP(10)) continue;
ind[11] = buck_fin_6[slot0_fin_6].hash[7];
if (CHECK_DUP(11)) continue;
u32 f12 = buck_v3_2[slot0_v3_2].hash[1];
const slot* buck_v2_4 = eq->trees[0][PACKER::get_bucketid(f12, RB, SM)];
const u32 slot1_v2_4 = PACKER::get_slot1(f12, RB, SM);
const u32 slot0_v2_4 = PACKER::get_slot0(f12, slot1_v2_4, RB, SM);
u32 f13 = buck_v2_4[slot1_v2_4].hash[6];
const slot* buck_fin_7 = eq->round0trees[packer_default::get_bucketid(f13, 8, RB8_NSLOTS)];
const u32 slot1_fin_7 = packer_default::get_slot1(f13, 8, RB8_NSLOTS);
const u32 slot0_fin_7 = packer_default::get_slot0(f13, slot1_fin_7, 8, RB8_NSLOTS);
ind[12] = buck_fin_7[slot1_fin_7].hash[7];
if (CHECK_DUP(12)) continue;
ind[13] = buck_fin_7[slot0_fin_7].hash[7];
if (CHECK_DUP(13)) continue;
u32 f14 = buck_v2_4[slot0_v2_4].hash[6];
const slot* buck_fin_8 = eq->round0trees[packer_default::get_bucketid(f14, 8, RB8_NSLOTS)];
const u32 slot1_fin_8 = packer_default::get_slot1(f14, 8, RB8_NSLOTS);
const u32 slot0_fin_8 = packer_default::get_slot0(f14, slot1_fin_8, 8, RB8_NSLOTS);
ind[14] = buck_fin_8[slot1_fin_8].hash[7];
if (CHECK_DUP(14)) continue;
ind[15] = buck_fin_8[slot0_fin_8].hash[7];
if (CHECK_DUP(15)) continue;
u32 soli;
if (lane == 0) {
soli = atomicAdd(&eq->edata.srealcont.nsols, 1);
}
#if __CUDA_ARCH__ >= 300
// all threads get the value from lane 0
soli = __shfl2(soli, 0);
#else
__syncthreads();
soli = eq->edata.srealcont.nsols;
#endif
if (soli < MAXREALSOLS)
{
u32 pos = lane << 4;
*(uint4*)(&eq->edata.srealcont.sols[soli][pos ]) = *(uint4*)(&ind[ 0]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 4]) = *(uint4*)(&ind[ 4]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 8]) = *(uint4*)(&ind[ 8]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 12]) = *(uint4*)(&ind[12]);
}
}
}
//std::mutex dev_init;
int dev_init_done[MAX_GPUS] = { 0 };
__host__
static int compu32(const void *pa, const void *pb)
{
uint32_t a = *(uint32_t *)pa, b = *(uint32_t *)pb;
return a<b ? -1 : a == b ? 0 : +1;
}
__host__
static bool duped(uint32_t* prf)
{
uint32_t sortprf[512];
memcpy(sortprf, prf, sizeof(uint32_t) * 512);
qsort(sortprf, 512, sizeof(uint32_t), &compu32);
for (uint32_t i = 1; i<512; i++) {
if (sortprf[i] <= sortprf[i - 1])
return true;
}
return false;
}
__host__
static void sort_pair(uint32_t *a, uint32_t len)
{
uint32_t *b = a + len;
uint32_t tmp, need_sorting = 0;
for (uint32_t i = 0; i < len; i++) {
if (need_sorting || a[i] > b[i])
{
need_sorting = 1;
tmp = a[i];
a[i] = b[i];
b[i] = tmp;
}
else if (a[i] < b[i])
return;
}
}
__host__
static void setheader(blake2b_state *ctx, const char *header, const u32 headerLen, const char* nce, const u32 nonceLen)
{
uint32_t le_N = WN;
uint32_t le_K = WK;
uchar personal[] = "ZcashPoW01230123";
memcpy(personal + 8, &le_N, 4);
memcpy(personal + 12, &le_K, 4);
blake2b_param P[1];
P->digest_length = HASHOUT;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
P->node_offset = 0;
P->node_depth = 0;
P->inner_length = 0;
memset(P->reserved, 0, sizeof(P->reserved));
memset(P->salt, 0, sizeof(P->salt));
memcpy(P->personal, (const uint8_t *)personal, 16);
eq_blake2b_init_param(ctx, P);
eq_blake2b_update(ctx, (const uchar *)header, headerLen);
if (nonceLen) eq_blake2b_update(ctx, (const uchar *)nce, nonceLen);
}
#ifdef WIN32
typedef hipError_t(CUDAAPI *dec_cuDeviceGet)(hipDevice_t*, int);
typedef hipError_t(CUDAAPI *dec_cuCtxCreate)(hipCtx_t*, unsigned int, hipDevice_t);
typedef hipError_t(CUDAAPI *dec_cuCtxPushCurrent)(hipCtx_t);
typedef hipError_t(CUDAAPI *dec_cuCtxDestroy)(hipCtx_t);
dec_cuDeviceGet _cuDeviceGet = nullptr;
dec_cuCtxCreate _cuCtxCreate = nullptr;
dec_cuCtxPushCurrent _cuCtxPushCurrent = nullptr;
dec_cuCtxDestroy _cuCtxDestroy = nullptr;
#endif
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__ eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::eq_cuda_context(int thr_id, int dev_id, fn_validate validate, fn_cancel cancel)
{
m_fnValidate = validate;
m_fnCancel = cancel;
thread_id = thr_id;
device_id = dev_id;
solutions = nullptr;
equi_mem_sz = sizeof(equi<RB, SM>);
throughput = NBLOCKS;
totalblocks = NBLOCKS/FD_THREADS;
threadsperblock = FD_THREADS;
threadsperblock_digits = THREADS;
//dev_init.lock();
if (!dev_init_done[device_id])
{
// only first thread shall init device
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
checkCudaErrors(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
pctx = nullptr;
}
else
{
// create new context
hipDevice_t dev;
#ifdef WIN32
if (_cuDeviceGet == nullptr)
{
HMODULE hmod = LoadLibraryA("nvcuda.dll");
if (hmod == NULL)
throw std::runtime_error("Failed to load nvcuda.dll");
_cuDeviceGet = (dec_cuDeviceGet)GetProcAddress(hmod, "hipDeviceGet");
if (_cuDeviceGet == nullptr)
throw std::runtime_error("Failed to get hipDeviceGet address");
_cuCtxCreate = (dec_cuCtxCreate)GetProcAddress(hmod, "hipCtxCreate");
if (_cuCtxCreate == nullptr)
throw std::runtime_error("Failed to get hipCtxCreate address");
_cuCtxPushCurrent = (dec_cuCtxPushCurrent)GetProcAddress(hmod, "hipCtxPushCurrent");
if (_cuCtxPushCurrent == nullptr)
throw std::runtime_error("Failed to get cuCtxPushCurrent address");
_cuCtxDestroy = (dec_cuCtxDestroy)GetProcAddress(hmod, "hipCtxDestroy");
if (_cuCtxDestroy == nullptr)
throw std::runtime_error("Failed to get hipCtxDestroy address");
}
checkCudaDriverErrors(_cuDeviceGet(&dev, device_id));
checkCudaDriverErrors(_cuCtxCreate(&pctx, HIP_CTX_SCHED_BLOCKING_SYNC, dev));
checkCudaDriverErrors(_cuCtxPushCurrent(pctx));
#else
checkCudaDriverErrors(hipDeviceGet(&dev, device_id));
checkCudaDriverErrors(hipCtxCreate(&pctx, HIP_CTX_SCHED_BLOCKING_SYNC, dev));
checkCudaDriverErrors(cuCtxPushCurrent(pctx));
#endif
}
++dev_init_done[device_id];
//dev_init.unlock();
if (hipMalloc((void**)&device_eq, equi_mem_sz) != hipSuccess)
throw std::runtime_error("CUDA: failed to alloc memory");
solutions = (scontainerreal*) malloc(sizeof(scontainerreal));
if (!solutions)
throw std::runtime_error("EOM: failed to alloc solutions memory");
}
std::vector<unsigned char> GetMinimalFromIndices(std::vector<uint32_t> indices,
size_t cBitLen);
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__ bool eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::solve(
unsigned char *pblock,
unsigned char *header,
unsigned int headerlen)
{
blake2b_state blake_ctx;
int blocks = NBUCKETS;
setheader(&blake_ctx, (const char *)header, 140-32, (const char *)header+140-32, 32);
// todo: improve
// djezo solver allows last 4 bytes of nonce to be iterrated
// this can be used to create internal loop - calc initial blake hash only once, then load 8*8 bytes on device (blake state h)
// then just iterate nn++
// less CPU load, 1 hipMemcpy less -> faster
//u32 nn = *(u32*)&nonce[28];
u32 nn = 0;
checkCudaErrors(hipMemcpy(&device_eq->blake_h, &blake_ctx.h, sizeof(u64) * 8, hipMemcpyHostToDevice));
checkCudaErrors(hipMemset(&device_eq->edata, 0, sizeof(device_eq->edata)));
hipLaunchKernelGGL(( digit_first<RB, SM, PACKER>) , dim3(NBLOCKS / FD_THREADS), dim3(FD_THREADS) , 0, 0, device_eq, nn);
hipLaunchKernelGGL(( digit_1<RB, SM, SSM, PACKER, 4 * NRESTS, 512>) , dim3(4096), dim3(512) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit_2<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS>) , dim3(blocks), dim3(THREADS) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit_3<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS>) , dim3(blocks), dim3(THREADS) , 0, 0, device_eq);
if (m_fnCancel()) return false;
hipLaunchKernelGGL(( digit_4<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS>) , dim3(blocks), dim3(THREADS) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit_5<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS>) , dim3(blocks), dim3(THREADS) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit_6<RB, SM, SSM - 1, PACKER, 3 * NRESTS>) , dim3(blocks), dim3(NRESTS) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit_7<RB, SM, SSM - 1, PACKER, 3 * NRESTS>) , dim3(blocks), dim3(NRESTS) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit_8<RB, SM, SSM - 1, PACKER, 3 * NRESTS>) , dim3(blocks), dim3(NRESTS) , 0, 0, device_eq);
hipLaunchKernelGGL(( digit_last_wdc<RB, SM, SSM - 3, 2, PACKER, 64, 8, 4>) , dim3(4096), dim3(256 / 2) , 0, 0, device_eq);
checkCudaErrors(hipMemcpy(solutions, &device_eq->edata.srealcont, (MAXREALSOLS * (512 * 4)) + 4, hipMemcpyDeviceToHost));
for (u32 s = 0; (s < solutions->nsols) && (s < MAXREALSOLS); s++)
{
// remove dups on CPU (dup removal on GPU is not fully exact and can pass on some invalid solutions)
if (duped(solutions->sols[s])) continue;
// perform sort of pairs
for (uint32_t level = 0; level < 9; level++)
for (uint32_t i = 0; i < (1 << 9); i += (2 << level))
sort_pair(&solutions->sols[s][i], 1 << level);
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions->sols[s][i];
}
std::vector<unsigned char> sol_char = GetMinimalFromIndices(index_vector, DIGITBITS);
if (m_fnValidate(sol_char, pblock, thread_id))
{
// If we find a POW solution, do not try other solutions
// because they become invalid as we created a new block in blockchain.
return true;
}
}
return false;
}
// destructor
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__
void eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::freemem()
{
if (solutions)
free(solutions);
if (device_eq) {
hipFree(device_eq);
device_eq = NULL;
}
if (pctx) {
// non primary thread, destroy context
#ifdef WIN32
checkCudaDriverErrors(_cuCtxDestroy(pctx));
#else
checkCudaDriverErrors(hipCtxDestroy(pctx));
#endif
} else {
checkCudaErrors(hipDeviceReset());
dev_init_done[device_id] = 0;
}
}
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__
eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::~eq_cuda_context()
{
freemem();
}
#ifdef CONFIG_MODE_1
template class eq_cuda_context<CONFIG_MODE_1>;
#endif
#ifdef CONFIG_MODE_2
template class eq_cuda_context<CONFIG_MODE_2>;
#endif
#ifdef CONFIG_MODE_3
template class eq_cuda_context<CONFIG_MODE_3>;
#endif
| c2f5e158028ecdcaa1539d5b2a038e284b646df2.cu | /*
* Equihash solver created by djeZo ([email protected]) for NiceHash
* Adapted to be more compatible with older C++ compilers
*
* cuda_djezo solver was released by NiceHash (www.nicehash.com) under
* GPL 3.0 license. If you don't have a copy, you can obtain one from
* https://www.gnu.org/licenses/gpl-3.0.txt
*
* Based on CUDA solver by John Tromp released under MIT license.
* Some helper functions taken out of OpenCL solver by Marc Bevand
* released under MIT license.
*
* Copyright (c) 2016 John Tromp, Marc Bevand
* Copyright (c) 2017 djeZo, Tanguy Pruvot (GPL v3)
*/
#ifdef WIN32
#include <Windows.h>
#endif
#include <stdio.h>
#include <vector>
//#include <mutex>
#include "equihash.h"
#include "eqcuda.hpp" // eq_cuda_context
#include "blake2/blake2.h"
//#define WN 200
//#define WK 9
#ifndef MAX_GPUS
#define MAX_GPUS 16
#endif
#define NDIGITS (WK+1)
#define DIGITBITS (WN/(NDIGITS))
#define PROOFSIZE (1<<WK)
#define BASE (1<<DIGITBITS)
#define NHASHES (2*BASE)
#define HASHESPERBLAKE (512/WN)
#define HASHOUT (HASHESPERBLAKE*WN/8)
#define NBLOCKS ((NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE)
#define BUCKBITS (DIGITBITS - RB)
#define NBUCKETS (1 << BUCKBITS)
#define BUCKMASK (NBUCKETS - 1)
#define SLOTBITS (RB + 2)
#define SLOTRANGE (1 << SLOTBITS)
#define NSLOTS SM
#define SLOTMASK (SLOTRANGE - 1)
#define NRESTS (1 << RB)
#define RESTMASK (NRESTS - 1)
#define CANTORBITS (2 * SLOTBITS - 2)
#define CANTORMASK ((1 << CANTORBITS) - 1)
#define CANTORMAXSQRT (2 * NSLOTS)
#define RB8_NSLOTS 640
#define RB8_NSLOTS_LD 624
#define FD_THREADS 128
#ifdef __INTELLISENSE__
// reduce vstudio editor warnings
#include <device_functions.h>
#include <device_launch_parameters.h>
#define __launch_bounds__(max_tpb, min_blocks)
#define __CUDA_ARCH__ 520
uint32_t __byte_perm(uint32_t x, uint32_t y, uint32_t z);
uint32_t __byte_perm(uint32_t x, uint32_t y, uint32_t z);
uint32_t __shfl2(uint32_t x, uint32_t y);
uint32_t __shfl_sync(uint32_t mask, uint32_t x, uint32_t y);
uint32_t atomicExch(uint32_t *x, uint32_t y);
uint32_t atomicAdd(uint32_t *x, uint32_t y);
void __syncthreads(void);
void __threadfence(void);
void __threadfence_block(void);
uint32_t __ldg(const uint32_t* address);
uint64_t __ldg(const uint64_t* address);
uint4 __ldca(const uint4 *ptr);
u32 __ldca(const u32 *ptr);
u32 umin(const u32, const u32);
u32 umax(const u32, const u32);
#endif
#define OPT_SYNC_ALL
#if CUDA_VERSION >= 9000 && __CUDA_ARCH__ >= 300
#define __shfl2(var, srcLane) __shfl_sync(0xFFFFFFFFu, var, srcLane)
#undef __any
#define __any(p) __any_sync(0xFFFFFFFFu, p)
#else
#define __shfl2 __shfl
#endif
typedef u32 proof[PROOFSIZE];
struct __align__(32) slot {
u32 hash[8];
};
struct __align__(16) slotsmall {
u32 hash[4];
};
struct __align__(8) slottiny {
u32 hash[2];
};
template <u32 RB, u32 SM>
struct equi
{
slot round0trees[4096][RB8_NSLOTS];
slot trees[1][NBUCKETS][NSLOTS];
struct {
slotsmall treessmall[NSLOTS];
slottiny treestiny[NSLOTS];
} round2trees[NBUCKETS];
struct {
slotsmall treessmall[NSLOTS];
slottiny treestiny[NSLOTS];
} round3trees[NBUCKETS];
slotsmall treessmall[4][NBUCKETS][NSLOTS];
slottiny treestiny[1][4096][RB8_NSLOTS_LD];
u32 round4bidandsids[NBUCKETS][NSLOTS];
union {
u64 blake_h[8];
u32 blake_h32[16];
};
struct {
u32 nslots8[4096];
u32 nslots0[4096];
u32 nslots[9][NBUCKETS];
scontainerreal srealcont;
} edata;
};
// todo: use cuda_helper.h and/or cuda_vector.h
__device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b)
{
return make_uint2(a.x ^ b.x, a.y ^ b.y);
}
__device__ __forceinline__ uint4 operator^ (uint4 a, uint4 b)
{
return make_uint4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w);
}
// for ROR 63 (or ROL 1); this func only support (32 <= offset < 64)
__device__ __forceinline__ uint2 ROR2(const uint2 a, const int offset)
{
uint2 result;
#if __CUDA_ARCH__ > 300
{
asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset));
asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset));
}
#else
result.y = ((a.x >> (offset - 32)) | (a.y << (64 - offset)));
result.x = ((a.y >> (offset - 32)) | (a.x << (64 - offset)));
#endif
return result;
}
__device__ __forceinline__ uint2 SWAPUINT2(uint2 value)
{
return make_uint2(value.y, value.x);
}
__device__ __forceinline__ uint2 ROR24(const uint2 a)
{
uint2 result;
result.x = __byte_perm(a.y, a.x, 0x2107);
result.y = __byte_perm(a.y, a.x, 0x6543);
return result;
}
__device__ __forceinline__ uint2 ROR16(const uint2 a)
{
uint2 result;
result.x = __byte_perm(a.y, a.x, 0x1076);
result.y = __byte_perm(a.y, a.x, 0x5432);
return result;
}
__device__ __forceinline__ void G2(u64 & a, u64 & b, u64 & c, u64 & d, u64 x, u64 y)
{
a = a + b + x;
((uint2*)&d)[0] = SWAPUINT2(((uint2*)&d)[0] ^ ((uint2*)&a)[0]);
c = c + d;
((uint2*)&b)[0] = ROR24(((uint2*)&b)[0] ^ ((uint2*)&c)[0]);
a = a + b + y;
((uint2*)&d)[0] = ROR16(((uint2*)&d)[0] ^ ((uint2*)&a)[0]);
c = c + d;
((uint2*)&b)[0] = ROR2(((uint2*)&b)[0] ^ ((uint2*)&c)[0], 63U);
}
// untested..
struct packer_default
{
__device__ __forceinline__ static u32 set_bucketid_and_slots(const u32 bucketid, const u32 s0, const u32 s1, const u32 RB, const u32 SM)
{
return (((bucketid << SLOTBITS) | s0) << SLOTBITS) | s1;
}
__device__ __forceinline__ static u32 get_bucketid(const u32 bid, const u32 RB, const u32 SM)
{
// BUCKMASK-ed to prevent illegal memory accesses in case of memory errors
return (bid >> (2 * SLOTBITS)) & BUCKMASK;
}
__device__ __forceinline__ static u32 get_slot0(const u32 bid, const u32 s1, const u32 RB, const u32 SM)
{
return bid & SLOTMASK;
}
__device__ __forceinline__ static u32 get_slot1(const u32 bid, const u32 RB, const u32 SM)
{
return (bid >> SLOTBITS) & SLOTMASK;
}
};
struct packer_cantor
{
__device__ __forceinline__ static u32 cantor(const u32 s0, const u32 s1)
{
u32 a = umax(s0, s1);
u32 b = umin(s0, s1);
return a * (a + 1) / 2 + b;
}
__device__ __forceinline__ static u32 set_bucketid_and_slots(const u32 bucketid, const u32 s0, const u32 s1, const u32 RB, const u32 SM)
{
return (bucketid << CANTORBITS) | cantor(s0, s1);
}
__device__ __forceinline__ static u32 get_bucketid(const u32 bid, const u32 RB, const u32 SM)
{
return (bid >> CANTORBITS) & BUCKMASK;
}
__device__ __forceinline__ static u32 get_slot0(const u32 bid, const u32 s1, const u32 RB, const u32 SM)
{
return ((bid & CANTORMASK) - cantor(0, s1)) & SLOTMASK;
}
__device__ __forceinline__ static u32 get_slot1(const u32 bid, const u32 RB, const u32 SM)
{
u32 k, q, sqr = 8 * (bid & CANTORMASK) + 1;
// this k=sqrt(sqr) computing loop averages 3.4 iterations out of maximum 9
for (k = CANTORMAXSQRT; (q = sqr / k) < k; k = (k + q) / 2);
return ((k - 1) / 2) & SLOTMASK;
}
};
__device__ __constant__ const u64 blake_iv[] = {
0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f,
0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
};
#if CUDART_VERSION < 8000 || !defined(__ldca)
#define __ldca(ptr) *(ptr)
#endif
template <u32 RB, u32 SM, typename PACKER>
__global__ void digit_first(equi<RB, SM>* eq, u32 nonce)
{
const u32 block = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ u64 hash_h[8];
u32* hash_h32 = (u32*)hash_h;
if (threadIdx.x < 16)
hash_h32[threadIdx.x] = __ldca(&eq->blake_h32[threadIdx.x]);
__syncthreads();
u64 m = (u64)block << 32 | (u64)nonce;
union
{
u64 v[16];
u32 v32[32];
uint4 v128[8];
};
v[0] = hash_h[0];
v[1] = hash_h[1];
v[2] = hash_h[2];
v[3] = hash_h[3];
v[4] = hash_h[4];
v[5] = hash_h[5];
v[6] = hash_h[6];
v[7] = hash_h[7];
v[8] = blake_iv[0];
v[9] = blake_iv[1];
v[10] = blake_iv[2];
v[11] = blake_iv[3];
v[12] = blake_iv[4] ^ (128 + 16);
v[13] = blake_iv[5];
v[14] = blake_iv[6] ^ 0xffffffffffffffff;
v[15] = blake_iv[7];
// mix 1
G2(v[0], v[4], v[8], v[12], 0, m);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 2
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], m, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 3
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, m);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 4
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, m);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 5
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, m);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 6
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], m, 0);
// mix 7
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], m, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 8
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, m);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 9
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], m, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 10
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], m, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 11
G2(v[0], v[4], v[8], v[12], 0, m);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 12
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], m, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
v[0] ^= hash_h[0] ^ v[8];
v[1] ^= hash_h[1] ^ v[9];
v[2] ^= hash_h[2] ^ v[10];
v[3] ^= hash_h[3] ^ v[11];
v[4] ^= hash_h[4] ^ v[12];
v[5] ^= hash_h[5] ^ v[13];
v32[12] ^= hash_h32[12] ^ v32[28];
u32 bexor = __byte_perm(v32[0], 0, 0x4012); // first 20 bits
u32 bucketid;
asm("bfe.u32 %0, %1, 12, 12;" : "=r"(bucketid) : "r"(bexor));
u32 slotp = atomicAdd(&eq->edata.nslots0[bucketid], 1);
if (slotp < RB8_NSLOTS)
{
slot* s = &eq->round0trees[bucketid][slotp];
uint4 tt;
tt.x = __byte_perm(v32[0], v32[1], 0x1234);
tt.y = __byte_perm(v32[1], v32[2], 0x1234);
tt.z = __byte_perm(v32[2], v32[3], 0x1234);
tt.w = __byte_perm(v32[3], v32[4], 0x1234);
*(uint4*)(&s->hash[0]) = tt;
tt.x = __byte_perm(v32[4], v32[5], 0x1234);
tt.y = __byte_perm(v32[5], v32[6], 0x1234);
tt.z = 0;
tt.w = block << 1;
*(uint4*)(&s->hash[4]) = tt;
}
bexor = __byte_perm(v32[6], 0, 0x0123);
asm("bfe.u32 %0, %1, 12, 12;" : "=r"(bucketid) : "r"(bexor));
slotp = atomicAdd(&eq->edata.nslots0[bucketid], 1);
if (slotp < RB8_NSLOTS)
{
slot* s = &eq->round0trees[bucketid][slotp];
uint4 tt;
tt.x = __byte_perm(v32[6], v32[7], 0x2345);
tt.y = __byte_perm(v32[7], v32[8], 0x2345);
tt.z = __byte_perm(v32[8], v32[9], 0x2345);
tt.w = __byte_perm(v32[9], v32[10], 0x2345);
*(uint4*)(&s->hash[0]) = tt;
tt.x = __byte_perm(v32[10], v32[11], 0x2345);
tt.y = __byte_perm(v32[11], v32[12], 0x2345);
tt.z = 0;
tt.w = (block << 1) + 1;
*(uint4*)(&s->hash[4]) = tt;
}
}
/*
Functions digit_1 to digit_8 works by the same principle;
Each thread does 2-3 slot loads (loads are coalesced).
Xorwork of slots is loaded into shared memory and is kept in registers (except for digit_1).
At the same time, restbits (8 or 9 bits) in xorwork are used for collisions.
Restbits determine position in ht.
Following next is pair creation. First one (or two) pairs' xorworks are put into global memory
as soon as possible, the rest pairs are saved in shared memory (one u32 per pair - 16 bit indices).
In most cases, all threads have one (or two) pairs so with this trick, we offload memory writes a bit in last step.
In last step we save xorwork of pairs in memory.
*/
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_1(equi<RB, SM>* eq)
{
__shared__ u16 ht[256][SSM - 1];
__shared__ uint2 lastword1[RB8_NSLOTS];
__shared__ uint4 lastword2[RB8_NSLOTS];
__shared__ int ht_len[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < 256)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots0[bucketid], RB8_NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
uint2 ta[2];
uint4 tb[2];
u32 si[2];
#ifdef OPT_SYNC_ALL
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
__syncthreads();
#endif
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
const slot* pslot1 = eq->round0trees[bucketid] + si[i];
// get xhash
uint4 a1 = *(uint4*)(&pslot1->hash[0]);
uint2 a2 = *(uint2*)(&pslot1->hash[4]);
ta[i].x = a1.x;
ta[i].y = a1.y;
lastword1[si[i]] = ta[i];
tb[i].x = a1.z;
tb[i].y = a1.w;
tb[i].z = a2.x;
tb[i].w = a2.y;
lastword2[si[i]] = tb[i];
asm("bfe.u32 %0, %1, 20, 8;" : "=r"(hr[i]) : "r"(ta[i].x));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
int* pairs = ht_len;
u32 xors[6];
u32 xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = ta[i] ^ lastword1[p];
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[1][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[2]) = lastword2[si[i]] ^ lastword2[p];
slot &xs = eq->trees[0][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
uint4 ttx;
ttx.x = xors[5];
ttx.y = xors[0];
ttx.z = packer_default::set_bucketid_and_slots(bucketid, si[i], p, 8, RB8_NSLOTS);
ttx.w = 0;
*(uint4*)(&xs.hash[4]) = ttx;
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = lastword1[i] ^ lastword1[k];
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[1][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[2]) = lastword2[i] ^ lastword2[k];
slot &xs = eq->trees[0][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
uint4 ttx;
ttx.x = xors[5];
ttx.y = xors[0];
ttx.z = packer_default::set_bucketid_and_slots(bucketid, i, k, 8, RB8_NSLOTS);
ttx.w = 0;
*(uint4*)(&xs.hash[4]) = ttx;
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_2(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][SSM - 1];
__shared__ u32 lastword1[NSLOTS];
__shared__ uint4 lastword2[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
slot* buck = eq->trees[0][bucketid];
u32 bsize = umin(eq->edata.nslots[1][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 ta[2];
uint4 tt[2];
u32 si[2];
#ifdef OPT_SYNC_ALL
__syncthreads();
#endif
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
// get slot
const slot* pslot1 = buck + si[i];
uint4 ttx = *(uint4*)(&pslot1->hash[0]);
lastword1[si[i]] = ta[i] = ttx.x;
uint2 tty = *(uint2*)(&pslot1->hash[4]);
tt[i].x = ttx.y;
tt[i].y = ttx.z;
tt[i].z = ttx.w;
tt[i].w = tty.x;
lastword2[si[i]] = tt[i];
hr[i] = tty.y & RESTMASK;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[5];
u32 xorbucketid, xorslot;
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[0] = ta[i] ^ lastword1[p];
xorbucketid = xors[0] >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[2][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[1]) = tt[i] ^ lastword2[p];
slotsmall &xs = eq->round2trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
slottiny &xst = eq->round2trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = xors[4];
ttx.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
xors[0] = lastword1[i] ^ lastword1[k];
xorbucketid = xors[0] >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[2][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[1]) = lastword2[i] ^ lastword2[k];
slotsmall &xs = eq->round2trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
slottiny &xst = eq->round2trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = xors[4];
ttx.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_3(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword1[NSLOTS];
__shared__ u32 lastword2[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots[2][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
u32 ta[2];
#ifdef OPT_SYNC_ALL
__syncthreads();
#endif
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
slotsmall &xs = eq->round2trees[bucketid].treessmall[si[i]];
slottiny &xst = eq->round2trees[bucketid].treestiny[si[i]];
tt[i] = *(uint4*)(&xs.hash[0]);
lastword1[si[i]] = tt[i];
ta[i] = xst.hash[0];
lastword2[si[i]] = ta[i];
asm("bfe.u32 %0, %1, 12, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[5];
u32 bexor, xorbucketid, xorslot;
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[4] = ta[i] ^ lastword2[p];
if (xors[4] != 0)
{
*(uint4*)(&xors[0]) = tt[i] ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x2107);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[3][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->round3trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
slottiny &xst = eq->round3trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = bexor;
ttx.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
xors[4] = lastword2[i] ^ lastword2[k];
if (xors[4] != 0)
{
*(uint4*)(&xors[0]) = lastword1[i] ^ lastword1[k];
bexor = __byte_perm(xors[0], xors[1], 0x2107);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[3][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->round3trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
slottiny &xst = eq->round3trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = bexor;
ttx.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_4(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots[3][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
#ifdef OPT_SYNC_ALL
__syncthreads();
#endif
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
slotsmall &xs = eq->round3trees[bucketid].treessmall[si[i]];
slottiny &xst = eq->round3trees[bucketid].treestiny[si[i]];
// get xhash
tt[i] = *(uint4*)(&xs.hash[0]);
lastword[si[i]] = tt[i];
hr[i] = xst.hash[0] & RESTMASK;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[4];
u32 xorbucketid, xorslot;
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint4*)(&xors[0]) = tt[i] ^ lastword[p];
if (xors[3] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(4 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[4][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[3][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
eq->round4bidandsids[xorbucketid][xorslot] = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint4*)(&xors[0]) = lastword[i] ^ lastword[k];
if (xors[3] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(4 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[4][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[3][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
eq->round4bidandsids[xorbucketid][xorslot] = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_5(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
slotsmall* buck = eq->treessmall[3][bucketid];
u32 bsize = umin(eq->edata.nslots[4][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
#ifdef OPT_SYNC_ALL
__syncthreads();
#endif
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
tt[i] = *(uint4*)(&pslot1->hash[0]);
lastword[si[i]] = tt[i];
asm("bfe.u32 %0, %1, 4, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[4];
u32 bexor, xorbucketid, xorslot;
#pragma unroll 2
for (u32 i = 0; i < 2; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint4*)(&xors[0]) = tt[i] ^ lastword[p];
if (xors[3] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x1076);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[5][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[2][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = xors[3];
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint4*)(&xors[0]) = lastword[i] ^ lastword[k];
if (xors[3] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x1076);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[5][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[2][xorbucketid][xorslot];
uint4 tt;
tt.x = xors[1];
tt.y = xors[2];
tt.z = xors[3];
tt.w = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint4*)(&xs.hash[0]) = tt;
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_6(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint2 lastword1[NSLOTS];
__shared__ u32 lastword2[NSLOTS];
__shared__ int ht_len[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
pairs_len = 0;
next_pair = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[5][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[2][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint4 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
tt[i] = *(uint4*)(&pslot1->hash[0]);
lastword1[si[i]] = *(uint2*)(&tt[i].x);
lastword2[si[i]] = tt[i].z;
asm("bfe.u32 %0, %1, 16, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
// doing this to save shared memory
int* pairs = ht_len;
__syncthreads();
u32 xors[3];
u32 bexor, xorbucketid, xorslot;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[2] = tt[i].z ^ lastword2[p];
if (xors[2] != 0)
{
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
xors[2] = tt[i].z ^ lastword2[p];
if (xors[2] != 0)
{
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
u32 pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
xors[2] = lastword2[i] ^ lastword2[k];
if (xors[2] == 0)
continue;
*(uint2*)(&xors[0]) = lastword1[i] ^ lastword1[k];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot >= NSLOTS) continue;
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_7(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ u32 lastword[NSLOTS][2];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
pairs_len = 0;
next_pair = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[6][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[0][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint4 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
// get xhash
tt[i] = *(uint4*)(&pslot1->hash[0]);
*(uint2*)(&lastword[si[i]][0]) = *(uint2*)(&tt[i].x);
asm("bfe.u32 %0, %1, 12, %2;" : "=r"(hr[i]) : "r"(tt[i].z), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[2];
u32 xorbucketid, xorslot;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[0];
ttx.y = xors[1];
ttx.z = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
ttx.w = 0;
*(uint4*)(&xs.hash[0]) = ttx;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[0];
ttx.y = xors[1];
ttx.z = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
ttx.w = 0;
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = *(uint2*)(&lastword[i][0]) ^ *(uint2*)(&lastword[k][0]);
if (xors[1] == 0)
continue;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot >= NSLOTS) continue;
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 tt;
tt.x = xors[0];
tt.y = xors[1];
tt.z = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
tt.w = 0;
*(uint4*)(&xs.hash[0]) = tt;
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_8(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ u32 lastword[NSLOTS][2];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
next_pair = 0;
pairs_len = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[7][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[1][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint2 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
// get xhash
tt[i] = *(uint2*)(&pslot1->hash[0]);
*(uint2*)(&lastword[si[i]][0]) = *(uint2*)(&tt[i].x);
asm("bfe.u32 %0, %1, 8, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[2];
u32 bexor, xorbucketid, xorslot;
#pragma unroll 3
for (u32 i = 0; i < 3; i++)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot < RB8_NSLOTS_LD)
{
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot < RB8_NSLOTS_LD)
{
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = *(uint2*)(&lastword[i][0]) ^ *(uint2*)(&lastword[k][0]);
if (xors[1] == 0)
continue;
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot >= RB8_NSLOTS_LD) continue;
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
/*
Last round function is similar to previous ones but has different ending.
We use warps to process final candidates. Each warp process one candidate.
First two bidandsids (u32 of stored bucketid and two slotids) are retreived by
lane 0 and lane 16, next four bidandsids by lane 0, 8, 16 and 24, ... until
all lanes in warp have bidandsids from round 4. Next, each thread retreives
16 indices. While doing so, indices are put into comparison using atomicExch
to determine if there are duplicates (tromp's method). At the end, if no
duplicates are found, candidate solution is saved (all indices). Note that this
dup check method is not exact so CPU dup checking is needed after.
*/
template <u32 RB, u32 SM, int SSM, u32 FCT, typename PACKER, u32 MAXPAIRS, u32 DUPBITS, u32 W>
__global__ void digit_last_wdc(equi<RB, SM>* eq)
{
__shared__ u8 shared_data[8192];
int* ht_len = (int*)(&shared_data[0]);
int* pairs = ht_len;
u32* lastword = (u32*)(&shared_data[256 * 4]);
u16* ht = (u16*)(&shared_data[256 * 4 + RB8_NSLOTS_LD * 4]);
u32* pairs_len = (u32*)(&shared_data[8188]);
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
#pragma unroll
for (u32 i = 0; i < FCT; i++)
ht_len[(i * (256 / FCT)) + threadid] = 0;
if (threadid == ((256 / FCT) - 1))
*pairs_len = 0;
slottiny* buck = eq->treestiny[0][bucketid];
u32 bsize = umin(eq->edata.nslots8[bucketid], RB8_NSLOTS_LD);
u32 si[3 * FCT];
u32 hr[3 * FCT];
int pos[3 * FCT];
u32 lw[3 * FCT];
#pragma unroll
for (u32 i = 0; i < (3 * FCT); i++)
pos[i] = SSM;
__syncthreads();
#pragma unroll
for (u32 i = 0; i < (3 * FCT); i++)
{
si[i] = i * (256 / FCT) + threadid;
if (si[i] >= bsize) break;
const slottiny* pslot1 = buck + si[i];
// get xhash
uint2 tt = *(uint2*)(&pslot1->hash[0]);
lw[i] = tt.x;
lastword[si[i]] = lw[i];
u32 a;
asm("bfe.u32 %0, %1, 20, 8;" : "=r"(a) : "r"(lw[i]));
hr[i] = a;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1))
ht[hr[i] * (SSM - 1) + pos[i]] = si[i];
}
__syncthreads();
#pragma unroll
for (u32 i = 0; i < (3 * FCT); i++)
{
if (pos[i] >= SSM) continue;
for (int k = 0; k != pos[i]; ++k)
{
u16 prev = ht[hr[i] * (SSM - 1) + k];
if (lw[i] != lastword[prev]) continue;
u32 pindex = atomicAdd(pairs_len, 1);
if (pindex >= MAXPAIRS) break;
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
__syncthreads();
u32 plen = umin(*pairs_len, 64);
#define CALC_LEVEL(a, b, c, d) { \
u32 plvl = levels[b]; \
u32* bucks = eq->round4bidandsids[PACKER::get_bucketid(plvl, RB, SM)]; \
u32 slot1 = PACKER::get_slot1(plvl, RB, SM); \
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM); \
levels[b] = bucks[slot1]; \
levels[c] = bucks[slot0]; \
}
#define CALC_LEVEL_SMALL(a, b, c, d) { \
u32 plvl = levels[b]; \
slotsmall* bucks = eq->treessmall[a][PACKER::get_bucketid(plvl, RB, SM)]; \
u32 slot1 = PACKER::get_slot1(plvl, RB, SM); \
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM); \
levels[b] = bucks[slot1].hash[d]; \
levels[c] = bucks[slot0].hash[d]; \
}
u32 lane = threadIdx.x & 0x1f;
u32 par = threadIdx.x >> 5;
u32* levels = (u32*)&pairs[MAXPAIRS + (par << DUPBITS)];
u32* susp = levels;
while (par < plen)
{
int pair = pairs[par];
par += W;
if (lane % 16 == 0)
{
u32 plvl;
if (lane == 0) plvl = buck[__byte_perm(pair, 0, 0x4510)].hash[1];
else plvl = buck[__byte_perm(pair, 0, 0x4532)].hash[1];
slotsmall* bucks = eq->treessmall[1][PACKER::get_bucketid(plvl, RB, SM)];
u32 slot1 = PACKER::get_slot1(plvl, RB, SM);
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM);
levels[lane] = bucks[slot1].hash[2];
levels[lane + 8] = bucks[slot0].hash[2];
}
if (lane % 8 == 0)
CALC_LEVEL_SMALL(0, lane, lane + 4, 3);
if (lane % 4 == 0)
CALC_LEVEL_SMALL(2, lane, lane + 2, 3);
if (lane % 2 == 0)
CALC_LEVEL(0, lane, lane + 1, 4);
u32 ind[16];
u32 f1 = levels[lane];
const slottiny* buck_v4 = &eq->round3trees[PACKER::get_bucketid(f1, RB, SM)].treestiny[0];
const u32 slot1_v4 = PACKER::get_slot1(f1, RB, SM);
const u32 slot0_v4 = PACKER::get_slot0(f1, slot1_v4, RB, SM);
susp[lane] = 0xffffffff;
susp[32 + lane] = 0xffffffff;
#define CHECK_DUP(a) \
__any(atomicExch(&susp[(ind[a] & ((1 << DUPBITS) - 1))], (ind[a] >> DUPBITS)) == (ind[a] >> DUPBITS))
u32 f2 = buck_v4[slot1_v4].hash[1];
const slottiny* buck_v3_1 = &eq->round2trees[PACKER::get_bucketid(f2, RB, SM)].treestiny[0];
const u32 slot1_v3_1 = PACKER::get_slot1(f2, RB, SM);
const u32 slot0_v3_1 = PACKER::get_slot0(f2, slot1_v3_1, RB, SM);
susp[64 + lane] = 0xffffffff;
susp[96 + lane] = 0xffffffff;
u32 f0 = buck_v3_1[slot1_v3_1].hash[1];
const slot* buck_v2_1 = eq->trees[0][PACKER::get_bucketid(f0, RB, SM)];
const u32 slot1_v2_1 = PACKER::get_slot1(f0, RB, SM);
const u32 slot0_v2_1 = PACKER::get_slot0(f0, slot1_v2_1, RB, SM);
susp[128 + lane] = 0xffffffff;
susp[160 + lane] = 0xffffffff;
u32 f3 = buck_v2_1[slot1_v2_1].hash[6];
const slot* buck_fin_1 = eq->round0trees[packer_default::get_bucketid(f3, 8, RB8_NSLOTS)];
const u32 slot1_fin_1 = packer_default::get_slot1(f3, 8, RB8_NSLOTS);
const u32 slot0_fin_1 = packer_default::get_slot0(f3, slot1_fin_1, 8, RB8_NSLOTS);
susp[192 + lane] = 0xffffffff;
susp[224 + lane] = 0xffffffff;
ind[0] = buck_fin_1[slot1_fin_1].hash[7];
if (CHECK_DUP(0)) continue;
ind[1] = buck_fin_1[slot0_fin_1].hash[7];
if (CHECK_DUP(1)) continue;
u32 f4 = buck_v2_1[slot0_v2_1].hash[6];
const slot* buck_fin_2 = eq->round0trees[packer_default::get_bucketid(f4, 8, RB8_NSLOTS)];
const u32 slot1_fin_2 = packer_default::get_slot1(f4, 8, RB8_NSLOTS);
const u32 slot0_fin_2 = packer_default::get_slot0(f4, slot1_fin_2, 8, RB8_NSLOTS);
ind[2] = buck_fin_2[slot1_fin_2].hash[7];
if (CHECK_DUP(2)) continue;
ind[3] = buck_fin_2[slot0_fin_2].hash[7];
if (CHECK_DUP(3)) continue;
u32 f5 = buck_v3_1[slot0_v3_1].hash[1];
const slot* buck_v2_2 = eq->trees[0][PACKER::get_bucketid(f5, RB, SM)];
const u32 slot1_v2_2 = PACKER::get_slot1(f5, RB, SM);
const u32 slot0_v2_2 = PACKER::get_slot0(f5, slot1_v2_2, RB, SM);
u32 f6 = buck_v2_2[slot1_v2_2].hash[6];
const slot* buck_fin_3 = eq->round0trees[packer_default::get_bucketid(f6, 8, RB8_NSLOTS)];
const u32 slot1_fin_3 = packer_default::get_slot1(f6, 8, RB8_NSLOTS);
const u32 slot0_fin_3 = packer_default::get_slot0(f6, slot1_fin_3, 8, RB8_NSLOTS);
ind[4] = buck_fin_3[slot1_fin_3].hash[7];
if (CHECK_DUP(4)) continue;
ind[5] = buck_fin_3[slot0_fin_3].hash[7];
if (CHECK_DUP(5)) continue;
u32 f7 = buck_v2_2[slot0_v2_2].hash[6];
const slot* buck_fin_4 = eq->round0trees[packer_default::get_bucketid(f7, 8, RB8_NSLOTS)];
const u32 slot1_fin_4 = packer_default::get_slot1(f7, 8, RB8_NSLOTS);
const u32 slot0_fin_4 = packer_default::get_slot0(f7, slot1_fin_4, 8, RB8_NSLOTS);
ind[6] = buck_fin_4[slot1_fin_4].hash[7];
if (CHECK_DUP(6)) continue;
ind[7] = buck_fin_4[slot0_fin_4].hash[7];
if (CHECK_DUP(7)) continue;
u32 f8 = buck_v4[slot0_v4].hash[1];
const slottiny* buck_v3_2 = &eq->round2trees[PACKER::get_bucketid(f8, RB, SM)].treestiny[0];
const u32 slot1_v3_2 = PACKER::get_slot1(f8, RB, SM);
const u32 slot0_v3_2 = PACKER::get_slot0(f8, slot1_v3_2, RB, SM);
u32 f9 = buck_v3_2[slot1_v3_2].hash[1];
const slot* buck_v2_3 = eq->trees[0][PACKER::get_bucketid(f9, RB, SM)];
const u32 slot1_v2_3 = PACKER::get_slot1(f9, RB, SM);
const u32 slot0_v2_3 = PACKER::get_slot0(f9, slot1_v2_3, RB, SM);
u32 f10 = buck_v2_3[slot1_v2_3].hash[6];
const slot* buck_fin_5 = eq->round0trees[packer_default::get_bucketid(f10, 8, RB8_NSLOTS)];
const u32 slot1_fin_5 = packer_default::get_slot1(f10, 8, RB8_NSLOTS);
const u32 slot0_fin_5 = packer_default::get_slot0(f10, slot1_fin_5, 8, RB8_NSLOTS);
ind[8] = buck_fin_5[slot1_fin_5].hash[7];
if (CHECK_DUP(8)) continue;
ind[9] = buck_fin_5[slot0_fin_5].hash[7];
if (CHECK_DUP(9)) continue;
u32 f11 = buck_v2_3[slot0_v2_3].hash[6];
const slot* buck_fin_6 = eq->round0trees[packer_default::get_bucketid(f11, 8, RB8_NSLOTS)];
const u32 slot1_fin_6 = packer_default::get_slot1(f11, 8, RB8_NSLOTS);
const u32 slot0_fin_6 = packer_default::get_slot0(f11, slot1_fin_6, 8, RB8_NSLOTS);
ind[10] = buck_fin_6[slot1_fin_6].hash[7];
if (CHECK_DUP(10)) continue;
ind[11] = buck_fin_6[slot0_fin_6].hash[7];
if (CHECK_DUP(11)) continue;
u32 f12 = buck_v3_2[slot0_v3_2].hash[1];
const slot* buck_v2_4 = eq->trees[0][PACKER::get_bucketid(f12, RB, SM)];
const u32 slot1_v2_4 = PACKER::get_slot1(f12, RB, SM);
const u32 slot0_v2_4 = PACKER::get_slot0(f12, slot1_v2_4, RB, SM);
u32 f13 = buck_v2_4[slot1_v2_4].hash[6];
const slot* buck_fin_7 = eq->round0trees[packer_default::get_bucketid(f13, 8, RB8_NSLOTS)];
const u32 slot1_fin_7 = packer_default::get_slot1(f13, 8, RB8_NSLOTS);
const u32 slot0_fin_7 = packer_default::get_slot0(f13, slot1_fin_7, 8, RB8_NSLOTS);
ind[12] = buck_fin_7[slot1_fin_7].hash[7];
if (CHECK_DUP(12)) continue;
ind[13] = buck_fin_7[slot0_fin_7].hash[7];
if (CHECK_DUP(13)) continue;
u32 f14 = buck_v2_4[slot0_v2_4].hash[6];
const slot* buck_fin_8 = eq->round0trees[packer_default::get_bucketid(f14, 8, RB8_NSLOTS)];
const u32 slot1_fin_8 = packer_default::get_slot1(f14, 8, RB8_NSLOTS);
const u32 slot0_fin_8 = packer_default::get_slot0(f14, slot1_fin_8, 8, RB8_NSLOTS);
ind[14] = buck_fin_8[slot1_fin_8].hash[7];
if (CHECK_DUP(14)) continue;
ind[15] = buck_fin_8[slot0_fin_8].hash[7];
if (CHECK_DUP(15)) continue;
u32 soli;
if (lane == 0) {
soli = atomicAdd(&eq->edata.srealcont.nsols, 1);
}
#if __CUDA_ARCH__ >= 300
// all threads get the value from lane 0
soli = __shfl2(soli, 0);
#else
__syncthreads();
soli = eq->edata.srealcont.nsols;
#endif
if (soli < MAXREALSOLS)
{
u32 pos = lane << 4;
*(uint4*)(&eq->edata.srealcont.sols[soli][pos ]) = *(uint4*)(&ind[ 0]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 4]) = *(uint4*)(&ind[ 4]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 8]) = *(uint4*)(&ind[ 8]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 12]) = *(uint4*)(&ind[12]);
}
}
}
//std::mutex dev_init;
int dev_init_done[MAX_GPUS] = { 0 };
__host__
static int compu32(const void *pa, const void *pb)
{
uint32_t a = *(uint32_t *)pa, b = *(uint32_t *)pb;
return a<b ? -1 : a == b ? 0 : +1;
}
__host__
static bool duped(uint32_t* prf)
{
uint32_t sortprf[512];
memcpy(sortprf, prf, sizeof(uint32_t) * 512);
qsort(sortprf, 512, sizeof(uint32_t), &compu32);
for (uint32_t i = 1; i<512; i++) {
if (sortprf[i] <= sortprf[i - 1])
return true;
}
return false;
}
__host__
static void sort_pair(uint32_t *a, uint32_t len)
{
uint32_t *b = a + len;
uint32_t tmp, need_sorting = 0;
for (uint32_t i = 0; i < len; i++) {
if (need_sorting || a[i] > b[i])
{
need_sorting = 1;
tmp = a[i];
a[i] = b[i];
b[i] = tmp;
}
else if (a[i] < b[i])
return;
}
}
__host__
static void setheader(blake2b_state *ctx, const char *header, const u32 headerLen, const char* nce, const u32 nonceLen)
{
uint32_t le_N = WN;
uint32_t le_K = WK;
uchar personal[] = "ZcashPoW01230123";
memcpy(personal + 8, &le_N, 4);
memcpy(personal + 12, &le_K, 4);
blake2b_param P[1];
P->digest_length = HASHOUT;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
P->node_offset = 0;
P->node_depth = 0;
P->inner_length = 0;
memset(P->reserved, 0, sizeof(P->reserved));
memset(P->salt, 0, sizeof(P->salt));
memcpy(P->personal, (const uint8_t *)personal, 16);
eq_blake2b_init_param(ctx, P);
eq_blake2b_update(ctx, (const uchar *)header, headerLen);
if (nonceLen) eq_blake2b_update(ctx, (const uchar *)nce, nonceLen);
}
#ifdef WIN32
typedef CUresult(CUDAAPI *dec_cuDeviceGet)(CUdevice*, int);
typedef CUresult(CUDAAPI *dec_cuCtxCreate)(CUcontext*, unsigned int, CUdevice);
typedef CUresult(CUDAAPI *dec_cuCtxPushCurrent)(CUcontext);
typedef CUresult(CUDAAPI *dec_cuCtxDestroy)(CUcontext);
dec_cuDeviceGet _cuDeviceGet = nullptr;
dec_cuCtxCreate _cuCtxCreate = nullptr;
dec_cuCtxPushCurrent _cuCtxPushCurrent = nullptr;
dec_cuCtxDestroy _cuCtxDestroy = nullptr;
#endif
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__ eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::eq_cuda_context(int thr_id, int dev_id, fn_validate validate, fn_cancel cancel)
{
m_fnValidate = validate;
m_fnCancel = cancel;
thread_id = thr_id;
device_id = dev_id;
solutions = nullptr;
equi_mem_sz = sizeof(equi<RB, SM>);
throughput = NBLOCKS;
totalblocks = NBLOCKS/FD_THREADS;
threadsperblock = FD_THREADS;
threadsperblock_digits = THREADS;
//dev_init.lock();
if (!dev_init_done[device_id])
{
// only first thread shall init device
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
pctx = nullptr;
}
else
{
// create new context
CUdevice dev;
#ifdef WIN32
if (_cuDeviceGet == nullptr)
{
HMODULE hmod = LoadLibraryA("nvcuda.dll");
if (hmod == NULL)
throw std::runtime_error("Failed to load nvcuda.dll");
_cuDeviceGet = (dec_cuDeviceGet)GetProcAddress(hmod, "cuDeviceGet");
if (_cuDeviceGet == nullptr)
throw std::runtime_error("Failed to get cuDeviceGet address");
_cuCtxCreate = (dec_cuCtxCreate)GetProcAddress(hmod, "cuCtxCreate_v2");
if (_cuCtxCreate == nullptr)
throw std::runtime_error("Failed to get cuCtxCreate address");
_cuCtxPushCurrent = (dec_cuCtxPushCurrent)GetProcAddress(hmod, "cuCtxPushCurrent_v2");
if (_cuCtxPushCurrent == nullptr)
throw std::runtime_error("Failed to get cuCtxPushCurrent address");
_cuCtxDestroy = (dec_cuCtxDestroy)GetProcAddress(hmod, "cuCtxDestroy_v2");
if (_cuCtxDestroy == nullptr)
throw std::runtime_error("Failed to get cuCtxDestroy address");
}
checkCudaDriverErrors(_cuDeviceGet(&dev, device_id));
checkCudaDriverErrors(_cuCtxCreate(&pctx, CU_CTX_SCHED_BLOCKING_SYNC, dev));
checkCudaDriverErrors(_cuCtxPushCurrent(pctx));
#else
checkCudaDriverErrors(cuDeviceGet(&dev, device_id));
checkCudaDriverErrors(cuCtxCreate(&pctx, CU_CTX_SCHED_BLOCKING_SYNC, dev));
checkCudaDriverErrors(cuCtxPushCurrent(pctx));
#endif
}
++dev_init_done[device_id];
//dev_init.unlock();
if (cudaMalloc((void**)&device_eq, equi_mem_sz) != cudaSuccess)
throw std::runtime_error("CUDA: failed to alloc memory");
solutions = (scontainerreal*) malloc(sizeof(scontainerreal));
if (!solutions)
throw std::runtime_error("EOM: failed to alloc solutions memory");
}
std::vector<unsigned char> GetMinimalFromIndices(std::vector<uint32_t> indices,
size_t cBitLen);
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__ bool eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::solve(
unsigned char *pblock,
unsigned char *header,
unsigned int headerlen)
{
blake2b_state blake_ctx;
int blocks = NBUCKETS;
setheader(&blake_ctx, (const char *)header, 140-32, (const char *)header+140-32, 32);
// todo: improve
// djezo solver allows last 4 bytes of nonce to be iterrated
// this can be used to create internal loop - calc initial blake hash only once, then load 8*8 bytes on device (blake state h)
// then just iterate nn++
// less CPU load, 1 cudaMemcpy less -> faster
//u32 nn = *(u32*)&nonce[28];
u32 nn = 0;
checkCudaErrors(cudaMemcpy(&device_eq->blake_h, &blake_ctx.h, sizeof(u64) * 8, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset(&device_eq->edata, 0, sizeof(device_eq->edata)));
digit_first<RB, SM, PACKER> <<<NBLOCKS / FD_THREADS, FD_THREADS >>>(device_eq, nn);
digit_1<RB, SM, SSM, PACKER, 4 * NRESTS, 512> <<<4096, 512 >>>(device_eq);
digit_2<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> <<<blocks, THREADS >>>(device_eq);
digit_3<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> <<<blocks, THREADS >>>(device_eq);
if (m_fnCancel()) return false;
digit_4<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> <<<blocks, THREADS >>>(device_eq);
digit_5<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> <<<blocks, THREADS >>>(device_eq);
digit_6<RB, SM, SSM - 1, PACKER, 3 * NRESTS> <<<blocks, NRESTS >>>(device_eq);
digit_7<RB, SM, SSM - 1, PACKER, 3 * NRESTS> <<<blocks, NRESTS >>>(device_eq);
digit_8<RB, SM, SSM - 1, PACKER, 3 * NRESTS> <<<blocks, NRESTS >>>(device_eq);
digit_last_wdc<RB, SM, SSM - 3, 2, PACKER, 64, 8, 4> <<<4096, 256 / 2 >>>(device_eq);
checkCudaErrors(cudaMemcpy(solutions, &device_eq->edata.srealcont, (MAXREALSOLS * (512 * 4)) + 4, cudaMemcpyDeviceToHost));
for (u32 s = 0; (s < solutions->nsols) && (s < MAXREALSOLS); s++)
{
// remove dups on CPU (dup removal on GPU is not fully exact and can pass on some invalid solutions)
if (duped(solutions->sols[s])) continue;
// perform sort of pairs
for (uint32_t level = 0; level < 9; level++)
for (uint32_t i = 0; i < (1 << 9); i += (2 << level))
sort_pair(&solutions->sols[s][i], 1 << level);
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions->sols[s][i];
}
std::vector<unsigned char> sol_char = GetMinimalFromIndices(index_vector, DIGITBITS);
if (m_fnValidate(sol_char, pblock, thread_id))
{
// If we find a POW solution, do not try other solutions
// because they become invalid as we created a new block in blockchain.
return true;
}
}
return false;
}
// destructor
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__
void eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::freemem()
{
if (solutions)
free(solutions);
if (device_eq) {
cudaFree(device_eq);
device_eq = NULL;
}
if (pctx) {
// non primary thread, destroy context
#ifdef WIN32
checkCudaDriverErrors(_cuCtxDestroy(pctx));
#else
checkCudaDriverErrors(cuCtxDestroy(pctx));
#endif
} else {
checkCudaErrors(cudaDeviceReset());
dev_init_done[device_id] = 0;
}
}
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__
eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::~eq_cuda_context()
{
freemem();
}
#ifdef CONFIG_MODE_1
template class eq_cuda_context<CONFIG_MODE_1>;
#endif
#ifdef CONFIG_MODE_2
template class eq_cuda_context<CONFIG_MODE_2>;
#endif
#ifdef CONFIG_MODE_3
template class eq_cuda_context<CONFIG_MODE_3>;
#endif
|
ce176aad2599b156e59cfb03eff8e4c05e69a03e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/cross_entropy_op.h"
namespace paddle {
namespace operators {
namespace {
template <typename T>
__global__ void CrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
const int64_t* label, const int N,
const int D) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
int idx = i * D + label[i];
dX[idx] = -dY[i] / X[idx];
}
}
template <typename T>
__global__ void SoftCrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
const T* label, const int N,
const int D) {
int ids = blockIdx.x * blockDim.x + threadIdx.x;
if (ids < N * D) {
int row_ids = ids / D;
dX[ids] = -label[ids] * dY[row_ids] / X[ids];
}
}
} // namespace
template <typename T>
class CrossEntropyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* label = ctx.Input<Tensor>("Label");
Tensor* y = ctx.Output<Tensor>("Y");
y->mutable_data<T>(ctx.GetPlace());
math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()(
ctx.template device_context<platform::CUDADeviceContext>(), y, x, label,
ctx.Attr<bool>("soft_label"));
}
};
template <typename T>
class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* label = ctx.Input<Tensor>("Label");
Tensor* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
dx->mutable_data<T>(ctx.GetPlace());
const T* dy_data =
ctx.Input<Tensor>(framework::GradVarName("Y"))->data<T>();
T* dx_data = dx->mutable_data<T>(ctx.GetPlace());
const T* x_data = x->data<T>();
int64_t batch_size = x->dims()[0];
int64_t class_num = x->dims()[1];
int block = 512;
int grid = (batch_size * class_num + block - 1) / block;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto stream = dev_ctx.stream();
if (ctx.Attr<bool>("soft_label")) {
auto* label_data = label->data<T>();
hipLaunchKernelGGL(( SoftCrossEntropyGradientKernel<T>), dim3(grid), dim3(block), 0, stream,
dx_data, dy_data, x_data, label_data, batch_size, class_num);
} else {
math::SetConstant<platform::CUDADeviceContext, T> functor;
functor(dev_ctx, dx, 0);
auto* label_data = label->data<int64_t>();
grid = (batch_size + block - 1) / block;
hipLaunchKernelGGL(( CrossEntropyGradientKernel<T>), dim3(grid), dim3(block), 0, stream,
dx_data, dy_data, x_data, label_data, batch_size, class_num);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(cross_entropy, ops::CrossEntropyOpCUDAKernel<float>,
ops::CrossEntropyOpCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(cross_entropy_grad,
ops::CrossEntropyGradientOpCUDAKernel<float>,
ops::CrossEntropyGradientOpCUDAKernel<double>);
| ce176aad2599b156e59cfb03eff8e4c05e69a03e.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/cross_entropy_op.h"
namespace paddle {
namespace operators {
namespace {
template <typename T>
__global__ void CrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
const int64_t* label, const int N,
const int D) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
int idx = i * D + label[i];
dX[idx] = -dY[i] / X[idx];
}
}
template <typename T>
__global__ void SoftCrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
const T* label, const int N,
const int D) {
int ids = blockIdx.x * blockDim.x + threadIdx.x;
if (ids < N * D) {
int row_ids = ids / D;
dX[ids] = -label[ids] * dY[row_ids] / X[ids];
}
}
} // namespace
template <typename T>
class CrossEntropyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* label = ctx.Input<Tensor>("Label");
Tensor* y = ctx.Output<Tensor>("Y");
y->mutable_data<T>(ctx.GetPlace());
math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()(
ctx.template device_context<platform::CUDADeviceContext>(), y, x, label,
ctx.Attr<bool>("soft_label"));
}
};
template <typename T>
class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* label = ctx.Input<Tensor>("Label");
Tensor* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
dx->mutable_data<T>(ctx.GetPlace());
const T* dy_data =
ctx.Input<Tensor>(framework::GradVarName("Y"))->data<T>();
T* dx_data = dx->mutable_data<T>(ctx.GetPlace());
const T* x_data = x->data<T>();
int64_t batch_size = x->dims()[0];
int64_t class_num = x->dims()[1];
int block = 512;
int grid = (batch_size * class_num + block - 1) / block;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto stream = dev_ctx.stream();
if (ctx.Attr<bool>("soft_label")) {
auto* label_data = label->data<T>();
SoftCrossEntropyGradientKernel<T><<<grid, block, 0, stream>>>(
dx_data, dy_data, x_data, label_data, batch_size, class_num);
} else {
math::SetConstant<platform::CUDADeviceContext, T> functor;
functor(dev_ctx, dx, 0);
auto* label_data = label->data<int64_t>();
grid = (batch_size + block - 1) / block;
CrossEntropyGradientKernel<T><<<grid, block, 0, stream>>>(
dx_data, dy_data, x_data, label_data, batch_size, class_num);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(cross_entropy, ops::CrossEntropyOpCUDAKernel<float>,
ops::CrossEntropyOpCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(cross_entropy_grad,
ops::CrossEntropyGradientOpCUDAKernel<float>,
ops::CrossEntropyGradientOpCUDAKernel<double>);
|
90bcc2c97efcb273e89ca0b064b1352ad0c212a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
//#define TEXTURE
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_1(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
const magmaDoubleComplex * __restrict__ dval,
const magma_index_t * __restrict__ dcolind,
const magma_index_t * __restrict__ drowptr,
const magmaDoubleComplex *__restrict__ dx,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ dy)
{
// threads assigned to rows
//int Idx = blockDim.x * blockIdx.x + threadIdx.x;
//int offset = drowptr[ blockIdx.x ];
//int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
// T threads assigned to each row
int idx = threadIdx.x; // local row
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < num_rows ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
magmaDoubleComplex val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * dx [ col ];
}
if (betazero) {
dy[ row ] = dot * alpha;
} else {
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaDoubleComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaDoubleComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
/************************* same but using texture mem *************************/
#if defined(PRECISION_d) && defined(TEXTURE)
__inline__ __device__ double
read_from_tex( hipTextureObject_t texdx, const int& i) {
int2 temp = tex1Dfetch<int2>( texdx, i );
return __hiloint2double(temp.y,temp.x);
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaDoubleComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaDoubleComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
#endif
/********************* end of texture versions **************************/
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
// using a 2D thread grid
int num_threads = blocksize*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = min( int( sqrt( double( slices ))), 65535 );
int dimgrid2 = min(magma_ceildiv( slices, dimgrid1 ), 65535);
int dimgrid3 = magma_ceildiv( slices, dimgrid1*dimgrid2 );
int num_tx = blocksize;
int Ms = num_threads * sizeof( magmaDoubleComplex );
// special case: alignment 1:
if( alignment == 1 ){
Ms = 0;
num_tx = 256;
int num_blocks = magma_ceildiv( n, 256 );
dimgrid1 = num_blocks; //min( int( sqrt( double( num_blocks ))), 65535 );
dimgrid2 = 1; //magma_ceildiv( num_blocks, dimgrid1 );
dimgrid3 = 1;
//blocksize = 256;
}
dim3 block( num_tx, alignment, 1);
if( dimgrid3 > 65535 ){
printf("error: too many GPU thread blocks requested.\n");
}
dim3 grid( dimgrid1, dimgrid2, 1);
#if defined(PRECISION_d) && defined(TEXTURE)
// Create channel.
hipChannelFormatDesc channel_desc;
channel_desc =
hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned);
// Create resource descriptor.
struct hipResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = hipResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m*sizeof(double);
// Specify texture object parameters.
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
// Create texture object.
hipTextureObject_t texdx = 0;
hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
if ( alignment == 1) {
if (beta == MAGMA_Z_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<true>), dim3(grid2), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
} else if ( alignment == 4){
if (beta == MAGMA_Z_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_Z_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_Z_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_Z_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
hipDestroyTextureObject(texdx);
#else
if ( alignment == 1) {
if (beta == MAGMA_Z_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 4){
if (beta == MAGMA_Z_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_Z_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_Z_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_Z_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return MAGMA_SUCCESS;
}
| 90bcc2c97efcb273e89ca0b064b1352ad0c212a8.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define PRECISION_z
//#define TEXTURE
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_1(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
const magmaDoubleComplex * __restrict__ dval,
const magma_index_t * __restrict__ dcolind,
const magma_index_t * __restrict__ drowptr,
const magmaDoubleComplex *__restrict__ dx,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ dy)
{
// threads assigned to rows
//int Idx = blockDim.x * blockIdx.x + threadIdx.x;
//int offset = drowptr[ blockIdx.x ];
//int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
// T threads assigned to each row
int idx = threadIdx.x; // local row
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < num_rows ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
magmaDoubleComplex val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * dx [ col ];
}
if (betazero) {
dy[ row ] = dot * alpha;
} else {
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaDoubleComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaDoubleComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
/************************* same but using texture mem *************************/
#if defined(PRECISION_d) && defined(TEXTURE)
__inline__ __device__ double
read_from_tex( cudaTextureObject_t texdx, const int& i) {
int2 temp = tex1Dfetch<int2>( texdx, i );
return __hiloint2double(temp.y,temp.x);
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaDoubleComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaDoubleComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaDoubleComplex shared[];
if(row < num_rows ) {
magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaDoubleComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
#endif
/********************* end of texture versions **************************/
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
// using a 2D thread grid
int num_threads = blocksize*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = min( int( sqrt( double( slices ))), 65535 );
int dimgrid2 = min(magma_ceildiv( slices, dimgrid1 ), 65535);
int dimgrid3 = magma_ceildiv( slices, dimgrid1*dimgrid2 );
int num_tx = blocksize;
int Ms = num_threads * sizeof( magmaDoubleComplex );
// special case: alignment 1:
if( alignment == 1 ){
Ms = 0;
num_tx = 256;
int num_blocks = magma_ceildiv( n, 256 );
dimgrid1 = num_blocks; //min( int( sqrt( double( num_blocks ))), 65535 );
dimgrid2 = 1; //magma_ceildiv( num_blocks, dimgrid1 );
dimgrid3 = 1;
//blocksize = 256;
}
dim3 block( num_tx, alignment, 1);
if( dimgrid3 > 65535 ){
printf("error: too many GPU thread blocks requested.\n");
}
dim3 grid( dimgrid1, dimgrid2, 1);
#if defined(PRECISION_d) && defined(TEXTURE)
// Create channel.
cudaChannelFormatDesc channel_desc;
channel_desc =
cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned);
// Create resource descriptor.
struct cudaResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = cudaResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m*sizeof(double);
// Specify texture object parameters.
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
// Create texture object.
cudaTextureObject_t texdx = 0;
cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
if ( alignment == 1) {
if (beta == MAGMA_Z_ZERO) {
zgesellptmv2d_kernel_1<true><<< grid2, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_1<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
} else if ( alignment == 4){
if (beta == MAGMA_Z_ZERO) {
zgesellptmv2d_kernel_4_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_4_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_Z_ZERO) {
zgesellptmv2d_kernel_8_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_8_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_Z_ZERO) {
zgesellptmv2d_kernel_16_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_16_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_Z_ZERO) {
zgesellptmv2d_kernel_32_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_32_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
cudaDestroyTextureObject(texdx);
#else
if ( alignment == 1) {
if (beta == MAGMA_Z_ZERO) {
zgesellptmv2d_kernel_1<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_1<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 4){
if (beta == MAGMA_Z_ZERO) {
zgesellptmv2d_kernel_4<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_4<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_Z_ZERO) {
zgesellptmv2d_kernel_8<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_8<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_Z_ZERO) {
zgesellptmv2d_kernel_16<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_16<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_Z_ZERO) {
zgesellptmv2d_kernel_32<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_32<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return MAGMA_SUCCESS;
}
|
46c38b3cc511dcf8d4d43f7ffd4346bf528c6244.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Chain.h>
#include <Config.h>
#include <constants.h>
#include <functions.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/reduce.h>
__global__ void samplePiAlp_kernel1(Chain *a){ /* kernel <<<G, 1>>> */
int g = IDX;
if(g < a->G){
if(pow((float) a->alp[g], 2) > 1e-6){
a->tmp1[g] = 1;
} else {
a->tmp1[g] = 0;
}
}
}
__global__ void samplePiAlp_kernel2(Chain *a){ /* kernel <<<1, 1>>> */
a->piAlp = rbetaDevice(a, 1, a->G + a->s1 + a->aTau, a->s1 + a->bTau);
}
__host__ void samplePiAlp(Chain *host_a, Chain *dev_a, Config *cfg){ /* host */
float myTime;
hipEvent_t start, stop;
if(cfg->constPiAlp)
return;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
if(cfg->verbose)
printf("piAlp ");
if(!cfg->alpPrior){
hipLaunchKernelGGL(( samplePiAlp_kernel1), dim3(G_GRID), dim3(G_BLOCK), 0, 0, dev_a);
thrust::device_ptr<num_t> tmp1(host_a->tmp1);
num_t s1 = thrust::reduce(tmp1, tmp1 + cfg->G);
CUDA_CALL(hipMemcpy(&(dev_a->s1), &s1, sizeof(num_t), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( samplePiAlp_kernel2), dim3(1), dim3(1), 0, 0, dev_a);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&myTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cfg->timePiAlp = myTime / MILLISECS;
}
| 46c38b3cc511dcf8d4d43f7ffd4346bf528c6244.cu | #include <Chain.h>
#include <Config.h>
#include <constants.h>
#include <functions.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/reduce.h>
__global__ void samplePiAlp_kernel1(Chain *a){ /* kernel <<<G, 1>>> */
int g = IDX;
if(g < a->G){
if(pow((float) a->alp[g], 2) > 1e-6){
a->tmp1[g] = 1;
} else {
a->tmp1[g] = 0;
}
}
}
__global__ void samplePiAlp_kernel2(Chain *a){ /* kernel <<<1, 1>>> */
a->piAlp = rbetaDevice(a, 1, a->G + a->s1 + a->aTau, a->s1 + a->bTau);
}
__host__ void samplePiAlp(Chain *host_a, Chain *dev_a, Config *cfg){ /* host */
float myTime;
cudaEvent_t start, stop;
if(cfg->constPiAlp)
return;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
if(cfg->verbose)
printf("piAlp ");
if(!cfg->alpPrior){
samplePiAlp_kernel1<<<G_GRID, G_BLOCK>>>(dev_a);
thrust::device_ptr<num_t> tmp1(host_a->tmp1);
num_t s1 = thrust::reduce(tmp1, tmp1 + cfg->G);
CUDA_CALL(cudaMemcpy(&(dev_a->s1), &s1, sizeof(num_t), cudaMemcpyHostToDevice));
samplePiAlp_kernel2<<<1, 1>>>(dev_a);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&myTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cfg->timePiAlp = myTime / MILLISECS;
}
|
6fdaa0c2f9a7bfc43351a543ed14ab59f284be47.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
int main()
{
const unsigned int N = 1048576;
const unsigned int bytes = N * sizeof(int);
int *h_a = (int*)malloc(bytes);
int *d_a;
hipMalloc((int**)&d_a, bytes);
memset(h_a, 0, bytes);
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(h_a, d_a, bytes, hipMemcpyDeviceToHost);
return 0;
} | 6fdaa0c2f9a7bfc43351a543ed14ab59f284be47.cu | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
int main()
{
const unsigned int N = 1048576;
const unsigned int bytes = N * sizeof(int);
int *h_a = (int*)malloc(bytes);
int *d_a;
cudaMalloc((int**)&d_a, bytes);
memset(h_a, 0, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(h_a, d_a, bytes, cudaMemcpyDeviceToHost);
return 0;
} |
c4e2780203af0744ed8c5fe7f55620a133650502.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file generate_proposal_retina.cu
* \brief GenProposalRetinaRetina Operator
* \author Shaoqing Ren, Jian Guo, Pengfei Chen, Yuntao Chen, Yanghao Li, Chenxia Han
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include "../tensor/sort_op.h"
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include <fstream>
#include <iterator>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./generate_proposal_retina-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
namespace {
// scores are (b, anchor, h, w)
// anchors are (h * w * anchor, 4)
// proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int num_class,
const int height,
const int width,
const Dtype* scores,
const Dtype* anchors,
Dtype* proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
int anchor_index = (h * width + w) * (num_anchors / num_class) + a / num_class;
proposals[index * 5 + 0] = anchors[anchor_index * 4 + 0];
proposals[index * 5 + 1] = anchors[anchor_index * 4 + 1];
proposals[index * 5 + 2] = anchors[anchor_index * 4 + 2];
proposals[index * 5 + 3] = anchors[anchor_index * 4 + 3];
proposals[index * 5 + 4] = scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, anchor * 4, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int num_class,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* means,
const Dtype* stds,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a / num_class * 4) * feat_height + h) * feat_width + w] * stds[0] + means[0];
float dy = deltas[((a / num_class * 4 + 1) * feat_height + h) * feat_width + w] * stds[1] + means[1];
float dw = deltas[((a / num_class * 4 + 2) * feat_height + h) * feat_width + w] * stds[2] + means[2];
float dh = deltas[((a / num_class * 4 + 3) * feat_height + h) * feat_width + w] * stds[3] + means[3];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
/*
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
*/
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
/*
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
*/
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const float min_size,
const float thresh,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
float score = dets[index * 5 + 4];
if (iw < min_size || ih < min_size || score <= thresh) {
dets[index * 5 + 0] = 0.0f;
dets[index * 5 + 1] = 0.0f;
dets[index * 5 + 2] = 0.0f;
dets[index * 5 + 3] = 0.0f;
dets[index * 5 + 4] = 0.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const int height,
const int width,
const int num_class,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 6 + j] = prev_dets[order_i * 5 + j];
}
dets[index * 6 + 5] = order_i % num_class;
}
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int out_size,
const int batchIdx,
const int out_channel,
Dtype* out,
Dtype* scores) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
if (index < out_size) {
for (int j = 0; j < 4; ++j) {
out[index * 4 + j] = dets[index * 6 + j];
}
int cid = min(out_channel - 1, static_cast<int>(dets[index * 6 + 5]) + 1);
scores[index * out_channel + cid] = dets[index * 6 + 4];
}
}
}
} // namespace
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu>
class GenProposalRetinaGPUOp : public Operator{
public:
explicit GenProposalRetinaGPUOp(GenProposalRetinaParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 4);
CHECK_EQ(out_data.size(), 2);
CHECK_EQ(req.size(), 2);
// CHECK_EQ(req[proposal::kOut], kWriteTo);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> scores = in_data[gen_proposal_retina::kClsProb].get<xpu, 4, float>(s); // batch_idx, anchor_idx, height_idx, width_idx
Tensor<xpu, 4> bbox_deltas = in_data[gen_proposal_retina::kBBoxPred].get<xpu, 4, float>(s); // batch_idx, height_idx, width_idx, anchor_idx
Tensor<xpu, 2> im_info = in_data[gen_proposal_retina::kImInfo].get<xpu, 2, float>(s); // batch_idx, 3(height, width, scale)
Tensor<xpu, 1> anchors = in_data[gen_proposal_retina::kAnchor].FlatTo1D<xpu, float>(s); // height * width * anchor, 4
Tensor<xpu, 3> out = out_data[gen_proposal_retina::kOut].get<xpu, 3, float>(s); // batch_idx, rois_idx, 5(batch_idx, x1, y1, x2, y2), batch_idx is needed after flatten
Tensor<xpu, 3> out_scores = out_data[gen_proposal_retina::kScore].get<xpu, 3, float>(s);
uint64_t WORKSPACE_LIMIT = 1024 * 1024 * param_.workspace; // 256 MB should be sufficient
Tensor<xpu, 1, uint8_t> workspace = ctx.requested[gen_proposal_retina::kTempSpace].get_space_typed<xpu, 1, uint8_t>(Shape1(WORKSPACE_LIMIT), s);
uint64_t allocated_bytes = 0ULL;
uint64_t allocated_bytes_outside_loop = 0ULL;
int nbatch = scores.size(0);
int num_anchors = scores.size(1);
int num_class = num_anchors / param_.num_anchors;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count;
rpn_pre_nms_top_n = ::min(rpn_pre_nms_top_n, count);
// Copy generated anchors to GPU
Tensor<xpu, 3> proposals(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape3(nbatch, count, 5));
allocated_bytes += nbatch * count * 5 * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(nbatch * 3);
FRCNN_CUDA_CHECK(hipMemcpy(cpu_im_info.data(),
im_info.dptr_,
sizeof(float) * cpu_im_info.size(),
hipMemcpyDeviceToHost)); // less than 64K
Shape<3> fg_scores_shape = Shape3(num_anchors, height, width);
// init output boxes and scores
out = 0.0f;
out_scores = 0.0f;
allocated_bytes_outside_loop = allocated_bytes;
bool batch_wise_anchor = param_.batch_wise_anchor;
int anchors_offset;
/* copy anchors for all images in batch */
for (int i = 0; i < nbatch; i++) {
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[i*3 + 0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[i*3 + 1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
float* batch_proposals = proposals.dptr_ + i * 5 * count;
/* get current batch foreground score */
float* foreground_score_ptr = reinterpret_cast<float *>(scores.dptr_) + i * count;
Tensor<xpu, 3> fg_scores = Tensor<xpu, 3>(foreground_score_ptr, fg_scores_shape);
/* copy proposals to a mesh grid */
anchors_offset = batch_wise_anchor ? i * count * 4 : 0;
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
hipLaunchKernelGGL(( ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, num_class, height, width,
fg_scores.dptr_, anchors.dptr_ + anchors_offset, batch_proposals);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* transform anchors and bbox_deltas into bboxes */
Tensor<xpu, 1> anchor_mean(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape1(4));
allocated_bytes += 4 * sizeof(float);
Tensor<xpu, 1> anchor_std(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape1(4));
allocated_bytes += 4 * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
std::vector<float> cpu_anchor_mean(param_.anchor_mean.begin(), param_.anchor_mean.end());
std::vector<float> cpu_anchor_std(param_.anchor_std.begin(), param_.anchor_std.end());
hipMemcpy(anchor_mean.dptr_, cpu_anchor_mean.data(), 4 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(anchor_std.dptr_, cpu_anchor_std.data(), 4 * sizeof(float), hipMemcpyHostToDevice);
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
hipLaunchKernelGGL(( IoUPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[i * 3 + 0], cpu_im_info[i * 3 + 1],
batch_proposals, bbox_deltas.dptr_ + i * 4 * count / num_class, batch_proposals);
} else {
hipLaunchKernelGGL(( BBoxPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, num_class, height, width, real_height, real_width,
cpu_im_info[i * 3 + 0], cpu_im_info[i * 3 + 1], anchor_mean.dptr_, anchor_std.dptr_,
batch_proposals, bbox_deltas.dptr_ + i * 4 * count / num_class, batch_proposals);
}
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* filter boxes with less than rpn_min_size */
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
hipLaunchKernelGGL(( FilterBoxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, param_.rpn_min_size * cpu_im_info[i * 3 + 2], param_.thresh, batch_proposals);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* copy score to a continuous memory */
Tensor<xpu, 1> score(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape1(count));
allocated_bytes += count * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
Tensor<xpu, 1, int> order(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(count));
allocated_bytes += count * sizeof(int);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
hipLaunchKernelGGL(( CopyScoreKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, batch_proposals, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* argsort score, save order */
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<float>());
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* Reorder proposals according to order */
Tensor<xpu, 2> ordered_proposals(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape2(rpn_pre_nms_top_n, 6));
allocated_bytes += rpn_pre_nms_top_n * 6 * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
hipLaunchKernelGGL(( ReorderProposalsKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
rpn_pre_nms_top_n, height, width, num_class, batch_proposals, order.dptr_, ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
/* copy results to output */
dimGrid.x = (out.size(1) + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
hipLaunchKernelGGL(( PrepareOutput), dim3(dimGrid), dim3(dimBlock), 0, 0,
out.size(1), ordered_proposals.dptr_, rpn_pre_nms_top_n, i,
out_scores.size(2),
out.dptr_ + i * out.size(1) * out.size(2),
out_scores.dptr_ + i * out_scores.size(1) * out_scores.size(2));
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// recycle all bytes allocated within loop
allocated_bytes = allocated_bytes_outside_loop;
}
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 4);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[gen_proposal_retina::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[gen_proposal_retina::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[gen_proposal_retina::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 1> ganchors = in_grad[gen_proposal_retina::kAnchor].FlatTo1D<xpu, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[gen_proposal_retina::kClsProb], 0);
Assign(gbbox, req[gen_proposal_retina::kBBoxPred], 0);
Assign(ginfo, req[gen_proposal_retina::kImInfo], 0);
Assign(ganchors, req[gen_proposal_retina::kAnchor], 0);
}
private:
GenProposalRetinaParam param_;
}; // class GenProposalRetinaGPUOp
template<>
Operator* CreateOp<gpu>(GenProposalRetinaParam param) {
return new GenProposalRetinaGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
| c4e2780203af0744ed8c5fe7f55620a133650502.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file generate_proposal_retina.cu
* \brief GenProposalRetinaRetina Operator
* \author Shaoqing Ren, Jian Guo, Pengfei Chen, Yuntao Chen, Yanghao Li, Chenxia Han
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include "../tensor/sort_op.h"
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include <fstream>
#include <iterator>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./generate_proposal_retina-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
namespace {
// scores are (b, anchor, h, w)
// anchors are (h * w * anchor, 4)
// proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int num_class,
const int height,
const int width,
const Dtype* scores,
const Dtype* anchors,
Dtype* proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
int anchor_index = (h * width + w) * (num_anchors / num_class) + a / num_class;
proposals[index * 5 + 0] = anchors[anchor_index * 4 + 0];
proposals[index * 5 + 1] = anchors[anchor_index * 4 + 1];
proposals[index * 5 + 2] = anchors[anchor_index * 4 + 2];
proposals[index * 5 + 3] = anchors[anchor_index * 4 + 3];
proposals[index * 5 + 4] = scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, anchor * 4, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int num_class,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* means,
const Dtype* stds,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a / num_class * 4) * feat_height + h) * feat_width + w] * stds[0] + means[0];
float dy = deltas[((a / num_class * 4 + 1) * feat_height + h) * feat_width + w] * stds[1] + means[1];
float dw = deltas[((a / num_class * 4 + 2) * feat_height + h) * feat_width + w] * stds[2] + means[2];
float dh = deltas[((a / num_class * 4 + 3) * feat_height + h) * feat_width + w] * stds[3] + means[3];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
/*
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
*/
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
/*
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
*/
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const float min_size,
const float thresh,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
float score = dets[index * 5 + 4];
if (iw < min_size || ih < min_size || score <= thresh) {
dets[index * 5 + 0] = 0.0f;
dets[index * 5 + 1] = 0.0f;
dets[index * 5 + 2] = 0.0f;
dets[index * 5 + 3] = 0.0f;
dets[index * 5 + 4] = 0.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const int height,
const int width,
const int num_class,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 6 + j] = prev_dets[order_i * 5 + j];
}
dets[index * 6 + 5] = order_i % num_class;
}
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int out_size,
const int batchIdx,
const int out_channel,
Dtype* out,
Dtype* scores) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
if (index < out_size) {
for (int j = 0; j < 4; ++j) {
out[index * 4 + j] = dets[index * 6 + j];
}
int cid = min(out_channel - 1, static_cast<int>(dets[index * 6 + 5]) + 1);
scores[index * out_channel + cid] = dets[index * 6 + 4];
}
}
}
} // namespace
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template<typename xpu>
class GenProposalRetinaGPUOp : public Operator{
public:
explicit GenProposalRetinaGPUOp(GenProposalRetinaParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 4);
CHECK_EQ(out_data.size(), 2);
CHECK_EQ(req.size(), 2);
// CHECK_EQ(req[proposal::kOut], kWriteTo);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> scores = in_data[gen_proposal_retina::kClsProb].get<xpu, 4, float>(s); // batch_idx, anchor_idx, height_idx, width_idx
Tensor<xpu, 4> bbox_deltas = in_data[gen_proposal_retina::kBBoxPred].get<xpu, 4, float>(s); // batch_idx, height_idx, width_idx, anchor_idx
Tensor<xpu, 2> im_info = in_data[gen_proposal_retina::kImInfo].get<xpu, 2, float>(s); // batch_idx, 3(height, width, scale)
Tensor<xpu, 1> anchors = in_data[gen_proposal_retina::kAnchor].FlatTo1D<xpu, float>(s); // height * width * anchor, 4
Tensor<xpu, 3> out = out_data[gen_proposal_retina::kOut].get<xpu, 3, float>(s); // batch_idx, rois_idx, 5(batch_idx, x1, y1, x2, y2), batch_idx is needed after flatten
Tensor<xpu, 3> out_scores = out_data[gen_proposal_retina::kScore].get<xpu, 3, float>(s);
uint64_t WORKSPACE_LIMIT = 1024 * 1024 * param_.workspace; // 256 MB should be sufficient
Tensor<xpu, 1, uint8_t> workspace = ctx.requested[gen_proposal_retina::kTempSpace].get_space_typed<xpu, 1, uint8_t>(Shape1(WORKSPACE_LIMIT), s);
uint64_t allocated_bytes = 0ULL;
uint64_t allocated_bytes_outside_loop = 0ULL;
int nbatch = scores.size(0);
int num_anchors = scores.size(1);
int num_class = num_anchors / param_.num_anchors;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count;
rpn_pre_nms_top_n = std::min(rpn_pre_nms_top_n, count);
// Copy generated anchors to GPU
Tensor<xpu, 3> proposals(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape3(nbatch, count, 5));
allocated_bytes += nbatch * count * 5 * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(nbatch * 3);
FRCNN_CUDA_CHECK(cudaMemcpy(cpu_im_info.data(),
im_info.dptr_,
sizeof(float) * cpu_im_info.size(),
cudaMemcpyDeviceToHost)); // less than 64K
Shape<3> fg_scores_shape = Shape3(num_anchors, height, width);
// init output boxes and scores
out = 0.0f;
out_scores = 0.0f;
allocated_bytes_outside_loop = allocated_bytes;
bool batch_wise_anchor = param_.batch_wise_anchor;
int anchors_offset;
/* copy anchors for all images in batch */
for (int i = 0; i < nbatch; i++) {
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[i*3 + 0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[i*3 + 1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
float* batch_proposals = proposals.dptr_ + i * 5 * count;
/* get current batch foreground score */
float* foreground_score_ptr = reinterpret_cast<float *>(scores.dptr_) + i * count;
Tensor<xpu, 3> fg_scores = Tensor<xpu, 3>(foreground_score_ptr, fg_scores_shape);
/* copy proposals to a mesh grid */
anchors_offset = batch_wise_anchor ? i * count * 4 : 0;
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
ProposalGridKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, num_class, height, width,
fg_scores.dptr_, anchors.dptr_ + anchors_offset, batch_proposals);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* transform anchors and bbox_deltas into bboxes */
Tensor<xpu, 1> anchor_mean(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape1(4));
allocated_bytes += 4 * sizeof(float);
Tensor<xpu, 1> anchor_std(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape1(4));
allocated_bytes += 4 * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
std::vector<float> cpu_anchor_mean(param_.anchor_mean.begin(), param_.anchor_mean.end());
std::vector<float> cpu_anchor_std(param_.anchor_std.begin(), param_.anchor_std.end());
cudaMemcpy(anchor_mean.dptr_, cpu_anchor_mean.data(), 4 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(anchor_std.dptr_, cpu_anchor_std.data(), 4 * sizeof(float), cudaMemcpyHostToDevice);
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
IoUPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[i * 3 + 0], cpu_im_info[i * 3 + 1],
batch_proposals, bbox_deltas.dptr_ + i * 4 * count / num_class, batch_proposals);
} else {
BBoxPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, num_class, height, width, real_height, real_width,
cpu_im_info[i * 3 + 0], cpu_im_info[i * 3 + 1], anchor_mean.dptr_, anchor_std.dptr_,
batch_proposals, bbox_deltas.dptr_ + i * 4 * count / num_class, batch_proposals);
}
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* filter boxes with less than rpn_min_size */
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
FilterBoxKernel<<<dimGrid, dimBlock>>>(
count, param_.rpn_min_size * cpu_im_info[i * 3 + 2], param_.thresh, batch_proposals);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* copy score to a continuous memory */
Tensor<xpu, 1> score(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape1(count));
allocated_bytes += count * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
Tensor<xpu, 1, int> order(reinterpret_cast<int *>(workspace.dptr_ + allocated_bytes), Shape1(count));
allocated_bytes += count * sizeof(int);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
CopyScoreKernel<<<dimGrid, dimBlock>>>(
count, batch_proposals, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* argsort score, save order */
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<float>());
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* Reorder proposals according to order */
Tensor<xpu, 2> ordered_proposals(reinterpret_cast<float *>(workspace.dptr_ + allocated_bytes), Shape2(rpn_pre_nms_top_n, 6));
allocated_bytes += rpn_pre_nms_top_n * 6 * sizeof(float);
CHECK_LT(allocated_bytes, WORKSPACE_LIMIT) << "Allocating more memory than workspace limit";
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
ReorderProposalsKernel<<<dimGrid, dimBlock>>>(
rpn_pre_nms_top_n, height, width, num_class, batch_proposals, order.dptr_, ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
/* copy results to output */
dimGrid.x = (out.size(1) + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
PrepareOutput<<<dimGrid, dimBlock>>>(
out.size(1), ordered_proposals.dptr_, rpn_pre_nms_top_n, i,
out_scores.size(2),
out.dptr_ + i * out.size(1) * out.size(2),
out_scores.dptr_ + i * out_scores.size(1) * out_scores.size(2));
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// recycle all bytes allocated within loop
allocated_bytes = allocated_bytes_outside_loop;
}
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 4);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[gen_proposal_retina::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[gen_proposal_retina::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[gen_proposal_retina::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 1> ganchors = in_grad[gen_proposal_retina::kAnchor].FlatTo1D<xpu, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[gen_proposal_retina::kClsProb], 0);
Assign(gbbox, req[gen_proposal_retina::kBBoxPred], 0);
Assign(ginfo, req[gen_proposal_retina::kImInfo], 0);
Assign(ganchors, req[gen_proposal_retina::kAnchor], 0);
}
private:
GenProposalRetinaParam param_;
}; // class GenProposalRetinaGPUOp
template<>
Operator* CreateOp<gpu>(GenProposalRetinaParam param) {
return new GenProposalRetinaGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
14087302ab627af74f39c52a519385494528ea5a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/native/SortingUtils.h>
#include <assert.h>
#include <c10/macros/Macros.h>
#include <stdlib.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <THH/THHDeviceUtils.cuh> // only for THCRoundUp?
#include <THH/THHNumerics.cuh>
#include <THH/THHScanUtils.cuh>
#include <THH/THHTensorMathReduce.cuh> // AddOp
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/inner_product.h>
#include <thrust/sequence.h>
#include <THH/THHThrustAllocator.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
#include <ATen/native/hip/SortingRadixSelect.cuh>
#include <ATen/NamedTensorUtils.h>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename index_t, int Dim>
__global__ void gatherKthValue(
cuda::detail::TensorInfo<scalar_t, index_t> input,
index_t inputSliceSize,
index_t k,
index_t numInputSlices,
index_t inputWithinSliceStride,
cuda::detail::TensorInfo<scalar_t, index_t> kthValue,
cuda::detail::TensorInfo<int64_t, index_t> indices) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of index_t
__shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit
index_t slice = getLinearBlockId<index_t>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
index_t sliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input);
index_t kthValueSliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, kthValue);
index_t indicesSliceStartIndex =
cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices);
scalar_t* inputSliceStart = &input.data[sliceStartIndex];
scalar_t* kthValueSliceStart = &kthValue.data[kthValueSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
scalar_t kValue = static_cast<scalar_t>(0);
radixSelect<
scalar_t,
typename TopKTypeConfig<scalar_t>::RadixType,
index_t,
false>(
inputSliceStart,
k,
inputSliceSize,
inputWithinSliceStride,
smem,
&kValue);
// Find the index of the k-th highest element
index_t kValueIndex = 0;
bool foundKValue = false;
for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
scalar_t v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride])
: static_cast<scalar_t>(0);
bool isKValue = inRange && THCNumerics<scalar_t>::eq_with_nan(v, kValue);
if (isKValue) {
kValueIndex = i;
foundKValue = true;
break;
}
}
if (foundKValue) {
kthValueSliceStart[0] = kValue;
indicesSliceStart[0] = kValueIndex;
}
}
struct KthValueLauncher {
int64_t k;
KthValueLauncher(int64_t k) : k(k) {}
template <typename scalar_t, typename index_t, int all_dims>
inline void launch(
cuda::detail::TensorInfo<scalar_t, index_t> values_info,
int collapse_values_dim,
cuda::detail::TensorInfo<int64_t, index_t> indices_info,
int collapse_indices_dim,
cuda::detail::TensorInfo<scalar_t, index_t> self_info,
int collapse_self_dim,
int64_t num_slices,
int64_t slice_size) {
dim3 grid;
if (!getGridFromTiles(num_slices, grid)) {
AT_ERROR("slices are too many");
}
dim3 block(
::min(THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( gatherKthValue<scalar_t, index_t, all_dims>), dim3(grid), dim3(block), 0, stream,
self_info,
slice_size,
k,
num_slices,
/* The actual dimension that the k-selection is running in */
/* may have changed from collapseDims() */
self_info.strides[collapse_self_dim],
values_info,
indices_info);
}
};
template <typename scalar_t>
void kthvalue_cuda_template(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim_,
bool keepdim) {
int64_t dim = maybe_wrap_dim(dim_, self.dim());
int64_t slicesize = self.size(dim);
// FIXME: This seems bogus, I only do this because it was the old behaviour.
// The reductions are fine, as long as the axis being reduced along
// isn't of 0 elements (and the output has elements).
TORCH_CHECK(
self.numel() > 0,
"cannot perform reduction function kthvalue",
" on tensor with no elements because the operation does not have an identity");
TORCH_CHECK(k >= 1 && k <= slicesize, "selected number k out of range");
_reduction_with_indices_allocate_or_resize_output(
values, indices, self, dim, keepdim);
if (self.dim() == 0 && self.numel() == 1) {
values.copy_(self);
indices.zero_();
return;
}
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
// Based on required index size, run the algorithm with the
// appropriate index type
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(values) &&
cuda::detail::canUse32BitIndexMath(indices)) {
run_launcher<scalar_t, uint32_t>(
values, indices, self, dim, KthValueLauncher(k));
} else {
run_launcher<scalar_t, uint64_t>(
values, indices, self, dim, KthValueLauncher(k));
}
if (!keepdim) {
values.squeeze_(dim);
indices.squeeze_(dim);
}
AT_CUDA_CHECK(hipGetLastError());
}
// this does not reduce to median with dim beause we don't want to copy twice
template <typename scalar_t>
Tensor median_cuda_template(const Tensor& self) {
TORCH_CHECK(self.numel() > 0, "median cannot be called with empty tensor");
if (self.dim() == 0 && self.numel() == 1) {
return self.clone();
}
auto self_copy = self.clone().view(-1);
auto values = at::empty({1}, self.options());
auto indices = at::empty({1}, self.options().dtype(kLong));
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
// Based on required index size, run the algorithm with the
// appropriate index type
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(values) &&
cuda::detail::canUse32BitIndexMath(indices)) {
run_launcher<scalar_t, uint32_t>(
values,
indices,
self_copy,
0,
KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based
} else {
run_launcher<scalar_t, uint64_t>(
values,
indices,
self_copy,
0,
KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based
}
return values.view({});
}
} // namespace
static std::tuple<Tensor&, Tensor&> kthvalue_out_impl_cuda(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "kthvalue_cuda", [&] {
kthvalue_cuda_template<scalar_t>(values, indices, self, k, dim, keepdim);
});
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor&, Tensor&> kthvalue_out_cuda(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim) {
auto result = [&]() {
#ifdef BUILD_NAMEDTENSOR
NoNamesGuard guard;
#endif
return kthvalue_out_impl_cuda(values, indices, self, k, dim, keepdim);
}();
#ifdef BUILD_NAMEDTENSOR
namedinference::propagate_names_for_reduction(values, self, dim, keepdim);
namedinference::propagate_names_for_reduction(indices, self, dim, keepdim);
#endif
return result;
}
Tensor median_cuda(const Tensor& self) {
return AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "median", [&] {
return median_cuda_template<scalar_t>(self);
});
}
} // namespace native
} // namespace at
| 14087302ab627af74f39c52a519385494528ea5a.cu | #include <ATen/ATen.h>
#include <ATen/native/SortingUtils.h>
#include <assert.h>
#include <c10/macros/Macros.h>
#include <stdlib.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <THC/THCDeviceUtils.cuh> // only for THCRoundUp?
#include <THC/THCNumerics.cuh>
#include <THC/THCScanUtils.cuh>
#include <THC/THCTensorMathReduce.cuh> // AddOp
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/inner_product.h>
#include <thrust/sequence.h>
#include <THC/THCThrustAllocator.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <ATen/native/cuda/SortingRadixSelect.cuh>
#include <ATen/NamedTensorUtils.h>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename index_t, int Dim>
__global__ void gatherKthValue(
cuda::detail::TensorInfo<scalar_t, index_t> input,
index_t inputSliceSize,
index_t k,
index_t numInputSlices,
index_t inputWithinSliceStride,
cuda::detail::TensorInfo<scalar_t, index_t> kthValue,
cuda::detail::TensorInfo<int64_t, index_t> indices) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of index_t
__shared__ int smem[C10_WARP_SIZE]; // one per each warp, up to warp limit
index_t slice = getLinearBlockId<index_t>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
index_t sliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, input);
index_t kthValueSliceStartIndex =
cuda::detail::IndexToOffset<scalar_t, index_t, Dim>::get(slice, kthValue);
index_t indicesSliceStartIndex =
cuda::detail::IndexToOffset<int64_t, index_t, Dim>::get(slice, indices);
scalar_t* inputSliceStart = &input.data[sliceStartIndex];
scalar_t* kthValueSliceStart = &kthValue.data[kthValueSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
scalar_t kValue = static_cast<scalar_t>(0);
radixSelect<
scalar_t,
typename TopKTypeConfig<scalar_t>::RadixType,
index_t,
false>(
inputSliceStart,
k,
inputSliceSize,
inputWithinSliceStride,
smem,
&kValue);
// Find the index of the k-th highest element
index_t kValueIndex = 0;
bool foundKValue = false;
for (index_t i = threadIdx.x; i < inputSliceSize; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
scalar_t v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride])
: static_cast<scalar_t>(0);
bool isKValue = inRange && THCNumerics<scalar_t>::eq_with_nan(v, kValue);
if (isKValue) {
kValueIndex = i;
foundKValue = true;
break;
}
}
if (foundKValue) {
kthValueSliceStart[0] = kValue;
indicesSliceStart[0] = kValueIndex;
}
}
struct KthValueLauncher {
int64_t k;
KthValueLauncher(int64_t k) : k(k) {}
template <typename scalar_t, typename index_t, int all_dims>
inline void launch(
cuda::detail::TensorInfo<scalar_t, index_t> values_info,
int collapse_values_dim,
cuda::detail::TensorInfo<int64_t, index_t> indices_info,
int collapse_indices_dim,
cuda::detail::TensorInfo<scalar_t, index_t> self_info,
int collapse_self_dim,
int64_t num_slices,
int64_t slice_size) {
dim3 grid;
if (!getGridFromTiles(num_slices, grid)) {
AT_ERROR("slices are too many");
}
dim3 block(
std::min(THCRoundUp(slice_size, (int64_t)C10_WARP_SIZE), (int64_t)1024));
auto stream = at::cuda::getCurrentCUDAStream();
gatherKthValue<scalar_t, index_t, all_dims><<<grid, block, 0, stream>>>(
self_info,
slice_size,
k,
num_slices,
/* The actual dimension that the k-selection is running in */
/* may have changed from collapseDims() */
self_info.strides[collapse_self_dim],
values_info,
indices_info);
}
};
template <typename scalar_t>
void kthvalue_cuda_template(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim_,
bool keepdim) {
int64_t dim = maybe_wrap_dim(dim_, self.dim());
int64_t slicesize = self.size(dim);
// FIXME: This seems bogus, I only do this because it was the old behaviour.
// The reductions are fine, as long as the axis being reduced along
// isn't of 0 elements (and the output has elements).
TORCH_CHECK(
self.numel() > 0,
"cannot perform reduction function kthvalue",
" on tensor with no elements because the operation does not have an identity");
TORCH_CHECK(k >= 1 && k <= slicesize, "selected number k out of range");
_reduction_with_indices_allocate_or_resize_output(
values, indices, self, dim, keepdim);
if (self.dim() == 0 && self.numel() == 1) {
values.copy_(self);
indices.zero_();
return;
}
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
// Based on required index size, run the algorithm with the
// appropriate index type
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(values) &&
cuda::detail::canUse32BitIndexMath(indices)) {
run_launcher<scalar_t, uint32_t>(
values, indices, self, dim, KthValueLauncher(k));
} else {
run_launcher<scalar_t, uint64_t>(
values, indices, self, dim, KthValueLauncher(k));
}
if (!keepdim) {
values.squeeze_(dim);
indices.squeeze_(dim);
}
AT_CUDA_CHECK(cudaGetLastError());
}
// this does not reduce to median with dim beause we don't want to copy twice
template <typename scalar_t>
Tensor median_cuda_template(const Tensor& self) {
TORCH_CHECK(self.numel() > 0, "median cannot be called with empty tensor");
if (self.dim() == 0 && self.numel() == 1) {
return self.clone();
}
auto self_copy = self.clone().view(-1);
auto values = at::empty({1}, self.options());
auto indices = at::empty({1}, self.options().dtype(kLong));
TORCH_CHECK(
self.dim() <= MAX_TENSORINFO_DIMS,
"cannot operate on more than ",
MAX_TENSORINFO_DIMS,
" dimensions");
// Based on required index size, run the algorithm with the
// appropriate index type
if (cuda::detail::canUse32BitIndexMath(self) &&
cuda::detail::canUse32BitIndexMath(values) &&
cuda::detail::canUse32BitIndexMath(indices)) {
run_launcher<scalar_t, uint32_t>(
values,
indices,
self_copy,
0,
KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based
} else {
run_launcher<scalar_t, uint64_t>(
values,
indices,
self_copy,
0,
KthValueLauncher((self_copy.size(0) + 1) / 2)); // KthValue is 1-based
}
return values.view({});
}
} // namespace
static std::tuple<Tensor&, Tensor&> kthvalue_out_impl_cuda(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "kthvalue_cuda", [&] {
kthvalue_cuda_template<scalar_t>(values, indices, self, k, dim, keepdim);
});
return std::forward_as_tuple(values, indices);
}
std::tuple<Tensor&, Tensor&> kthvalue_out_cuda(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t k,
int64_t dim,
bool keepdim) {
auto result = [&]() {
#ifdef BUILD_NAMEDTENSOR
NoNamesGuard guard;
#endif
return kthvalue_out_impl_cuda(values, indices, self, k, dim, keepdim);
}();
#ifdef BUILD_NAMEDTENSOR
namedinference::propagate_names_for_reduction(values, self, dim, keepdim);
namedinference::propagate_names_for_reduction(indices, self, dim, keepdim);
#endif
return result;
}
Tensor median_cuda(const Tensor& self) {
return AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "median", [&] {
return median_cuda_template<scalar_t>(self);
});
}
} // namespace native
} // namespace at
|
75e66e6addb6d73872ad3d11356c98f58a105bfc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/unary/log.h"
#include <hip/hip_fp16.h>
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
template <typename T>
__device__ __inline__ T ppl_scalar_log(const T &in_val)
{
return log(in_val);
}
template <>
__device__ __inline__ half ppl_scalar_log<half>(const half &in_val)
{
return __float2half(log(__half2float(in_val)));
}
#endif
template <typename T>
__global__ void ppl_cukernel_log_ndarray(
const uint64_t num_elems,
const T* input,
T* output)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
output[index] = ppl_scalar_log<T>(input[index]);
#endif
}
template <typename T>
__global__ void ppl_cukernel_log_nhwc(
const uint64_t num_elems,
int channels,
int pad_channels,
int chw,
int hw,
const T* input,
T* output)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
int chw_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (chw_idx >= chw)
return;
int c_idx = chw_idx % channels;
int hw_idx = chw_idx / channels;
int b_idx = blockIdx.z;
int64_t index = (b_idx * hw + hw_idx) * pad_channels + c_idx;
output[index] = ppl_scalar_log<T>(input[index]);
#endif
}
ppl::common::RetCode PPLCUDALogForwardImp(
hipStream_t stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output)
{
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int batch = output_shape->GetDim(0);
int channels = output_shape->GetDim(1);
int pad_channels = output_shape->GetDim(1) + output_shape->GetPadding1(1);
int height = output_shape->GetDim(2);
int width = output_shape->GetDim(3);
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NDARRAY) {
int block_size = 256;
int grid_size = (num_elems + block_size - 1) / block_size;
if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( ppl_cukernel_log_ndarray<float>), dim3(grid_size), dim3(block_size), 0, stream, num_elems,
(const float *)input,
(float *)output);
} else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( ppl_cukernel_log_ndarray<half>), dim3(grid_size), dim3(block_size), 0, stream, num_elems,
(const half *)input,
(half *)output);
} else {
return ppl::common::RC_UNSUPPORTED;
}
} else if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC) {
int block_size = 256;
dim3 grid_size;
int chw = channels * height * width;
grid_size.x = (chw + block_size - 1) / block_size;
grid_size.z = batch;
if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( ppl_cukernel_log_nhwc<float>), dim3(grid_size), dim3(block_size), 0, stream,
num_elems, channels, pad_channels, channels * height * width, height * width, (const float *)input, (float *)output);
} else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( ppl_cukernel_log_nhwc<half>), dim3(grid_size), dim3(block_size), 0, stream,
num_elems, channels, pad_channels, channels * height * width, height * width, (const half *)input, (half *)output);
} else {
return ppl::common::RC_UNSUPPORTED;
}
} else {
return ppl::common::RC_UNSUPPORTED;
}
return ppl::common::RC_SUCCESS;
} | 75e66e6addb6d73872ad3d11356c98f58a105bfc.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/unary/log.h"
#include <cuda_fp16.h>
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
template <typename T>
__device__ __inline__ T ppl_scalar_log(const T &in_val)
{
return log(in_val);
}
template <>
__device__ __inline__ half ppl_scalar_log<half>(const half &in_val)
{
return __float2half(log(__half2float(in_val)));
}
#endif
template <typename T>
__global__ void ppl_cukernel_log_ndarray(
const uint64_t num_elems,
const T* input,
T* output)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
output[index] = ppl_scalar_log<T>(input[index]);
#endif
}
template <typename T>
__global__ void ppl_cukernel_log_nhwc(
const uint64_t num_elems,
int channels,
int pad_channels,
int chw,
int hw,
const T* input,
T* output)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
int chw_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (chw_idx >= chw)
return;
int c_idx = chw_idx % channels;
int hw_idx = chw_idx / channels;
int b_idx = blockIdx.z;
int64_t index = (b_idx * hw + hw_idx) * pad_channels + c_idx;
output[index] = ppl_scalar_log<T>(input[index]);
#endif
}
ppl::common::RetCode PPLCUDALogForwardImp(
cudaStream_t stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output)
{
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int batch = output_shape->GetDim(0);
int channels = output_shape->GetDim(1);
int pad_channels = output_shape->GetDim(1) + output_shape->GetPadding1(1);
int height = output_shape->GetDim(2);
int width = output_shape->GetDim(3);
if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NDARRAY) {
int block_size = 256;
int grid_size = (num_elems + block_size - 1) / block_size;
if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
ppl_cukernel_log_ndarray<float><<<grid_size, block_size, 0, stream>>>(num_elems,
(const float *)input,
(float *)output);
} else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
ppl_cukernel_log_ndarray<half><<<grid_size, block_size, 0, stream>>>(num_elems,
(const half *)input,
(half *)output);
} else {
return ppl::common::RC_UNSUPPORTED;
}
} else if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC) {
int block_size = 256;
dim3 grid_size;
int chw = channels * height * width;
grid_size.x = (chw + block_size - 1) / block_size;
grid_size.z = batch;
if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
ppl_cukernel_log_nhwc<float><<<grid_size, block_size, 0, stream>>>(
num_elems, channels, pad_channels, channels * height * width, height * width, (const float *)input, (float *)output);
} else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
ppl_cukernel_log_nhwc<half><<<grid_size, block_size, 0, stream>>>(
num_elems, channels, pad_channels, channels * height * width, height * width, (const half *)input, (half *)output);
} else {
return ppl::common::RC_UNSUPPORTED;
}
} else {
return ppl::common::RC_UNSUPPORTED;
}
return ppl::common::RC_SUCCESS;
} |
93b4e5125985bd84e8f9ec7894ebd457f72fca98.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <random>
using namespace std;
//This implementation is a binaryReduction algorithm meaning that it will buffer out values to a power of 2.
//This version will only work for values 2048 and under so that all values can be fitted on one block. Larger optimizations to follow.
#define N 2047//number of input values
#define R 1024//reduction factor
#define F N/R//how many values will be in the final output
//powerTwo will use every thread to sum two values, then use half of those to sum those values, and so on unitl sizeOut is reached.
//sizeIn and sizeOut will be power of 2s
__global__ void reduce(double *a,double *z,int sizeIn, int sizeOut){
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if(tid >=sizeIn /2) return;
__shared__ double subTotals[N/2];
subTotals[tid]=(a[tid*2]+a[tid*2+1])/2;//sum every two values using all threads
__syncthreads();
int level=2;
while ((sizeIn/level) > sizeOut){//keep halving values until sizeout remains
if(tid % level==0){//use half threads every iteration
subTotals[tid]=(subTotals[tid]+subTotals[tid+(level/2)])/2;
}
__syncthreads();//we have to sync threads every time here :(
level = level * 2;
}
level = level /2;
if(tid % level==0){
z[tid/level] = subTotals[tid];
}
}
int main(){
int bufferedA = 1, bufferedZ =1;//size of arrays after buffering
while(N > bufferedA){//get closest power of 2 greater than A
bufferedA = bufferedA * 2;
}
int bufferSizeA = bufferedA - N;//amount that needs to be buffered
while(F > bufferedZ){//get closest power of 2 greater than Z
bufferedZ = bufferedZ * 2;
}
double *a,*z;//make a and z vectors
a = (double*)malloc(sizeof(double)*bufferedA);
z = (double*)malloc(sizeof(double)*bufferedZ);
for(int i =0;i< N;i++){//Initialize Values
// a[i]= rand() % 10;
a[i] = i;
}
for(int i = 0;i<bufferSizeA;i++){//wrap around buffer.
a[N+i] =a[i];//added buffer values will be equal to first few variables in the array as stated in problem
}
for(int i =0;i< bufferedA;i++){//print values to screen
cout << a[i] << " ";
}
cout << endl;
double *dev_a,*dev_z;//create device side variables
hipMalloc((void**)&dev_a,sizeof(double)*bufferedA);
hipMalloc((void**)&dev_z,sizeof(double)*bufferedZ);
hipMemcpy(dev_a,a,sizeof(double)*bufferedA,hipMemcpyHostToDevice);
dim3 gridSize(1);//number of blocks per grid remeber, should be 1 dimension
dim3 blockSize(bufferedA/2);//number of threads per block
hipLaunchKernelGGL(( reduce), dim3(gridSize),dim3(blockSize), 0, 0, dev_a,dev_z,bufferedA,bufferedZ);
hipMemcpy(z,dev_z,sizeof(double)*bufferedZ,hipMemcpyDeviceToHost);
cout << "Reduced Matrix:" <<endl;
for(int i =0;i< F;i++){//output final reduced values
cout << z[i] << " ";
}
cout << endl;
hipFree(dev_a);
hipFree(dev_z);
} | 93b4e5125985bd84e8f9ec7894ebd457f72fca98.cu | #include <iostream>
#include <random>
using namespace std;
//This implementation is a binaryReduction algorithm meaning that it will buffer out values to a power of 2.
//This version will only work for values 2048 and under so that all values can be fitted on one block. Larger optimizations to follow.
#define N 2047//number of input values
#define R 1024//reduction factor
#define F N/R//how many values will be in the final output
//powerTwo will use every thread to sum two values, then use half of those to sum those values, and so on unitl sizeOut is reached.
//sizeIn and sizeOut will be power of 2s
__global__ void reduce(double *a,double *z,int sizeIn, int sizeOut){
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if(tid >=sizeIn /2) return;
__shared__ double subTotals[N/2];
subTotals[tid]=(a[tid*2]+a[tid*2+1])/2;//sum every two values using all threads
__syncthreads();
int level=2;
while ((sizeIn/level) > sizeOut){//keep halving values until sizeout remains
if(tid % level==0){//use half threads every iteration
subTotals[tid]=(subTotals[tid]+subTotals[tid+(level/2)])/2;
}
__syncthreads();//we have to sync threads every time here :(
level = level * 2;
}
level = level /2;
if(tid % level==0){
z[tid/level] = subTotals[tid];
}
}
int main(){
int bufferedA = 1, bufferedZ =1;//size of arrays after buffering
while(N > bufferedA){//get closest power of 2 greater than A
bufferedA = bufferedA * 2;
}
int bufferSizeA = bufferedA - N;//amount that needs to be buffered
while(F > bufferedZ){//get closest power of 2 greater than Z
bufferedZ = bufferedZ * 2;
}
double *a,*z;//make a and z vectors
a = (double*)malloc(sizeof(double)*bufferedA);
z = (double*)malloc(sizeof(double)*bufferedZ);
for(int i =0;i< N;i++){//Initialize Values
// a[i]= rand() % 10;
a[i] = i;
}
for(int i = 0;i<bufferSizeA;i++){//wrap around buffer.
a[N+i] =a[i];//added buffer values will be equal to first few variables in the array as stated in problem
}
for(int i =0;i< bufferedA;i++){//print values to screen
cout << a[i] << " ";
}
cout << endl;
double *dev_a,*dev_z;//create device side variables
cudaMalloc((void**)&dev_a,sizeof(double)*bufferedA);
cudaMalloc((void**)&dev_z,sizeof(double)*bufferedZ);
cudaMemcpy(dev_a,a,sizeof(double)*bufferedA,cudaMemcpyHostToDevice);
dim3 gridSize(1);//number of blocks per grid remeber, should be 1 dimension
dim3 blockSize(bufferedA/2);//number of threads per block
reduce<<<gridSize,blockSize>>>(dev_a,dev_z,bufferedA,bufferedZ);
cudaMemcpy(z,dev_z,sizeof(double)*bufferedZ,cudaMemcpyDeviceToHost);
cout << "Reduced Matrix:" <<endl;
for(int i =0;i< F;i++){//output final reduced values
cout << z[i] << " ";
}
cout << endl;
cudaFree(dev_a);
cudaFree(dev_z);
} |
093ea546431bb55e09a24cb99075bb469c3cb092.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "ppl/cv/cuda/cvtcolor.h"
#include "cvtcolor_compute.hpp"
#include "cvtcolor_memory.hpp"
#include "utility.hpp"
namespace ppl {
namespace cv {
namespace cuda {
// BGR(RBB) <-> BGRA(RGBA)
CVT_COLOR_2VECTORS_INVOCATION(BGR2BGRA, uchar3, uchar4, float3, float4)
CVT_COLOR_2VECTORS_INVOCATION(RGB2RGBA, uchar3, uchar4, float3, float4)
CVT_COLOR_1VECTORS_INVOCATION(BGRA2BGR, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(RGBA2RGB, uchar4, uchar3, float4, float3)
CVT_COLOR_2VECTORS_INVOCATION(BGR2RGBA, uchar3, uchar4, float3, float4)
CVT_COLOR_2VECTORS_INVOCATION(RGB2BGRA, uchar3, uchar4, float3, float4)
CVT_COLOR_1VECTORS_INVOCATION(RGBA2BGR, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(BGRA2RGB, uchar4, uchar3, float4, float3)
// BGR <-> RGB
CVT_COLOR_1VECTORS_INVOCATION(BGR2RGB, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(RGB2BGR, uchar3, uchar3, float3, float3)
// BGRA <-> RGBA
CVT_COLOR_2VECTORS_INVOCATION(BGRA2RGBA, uchar4, uchar4, float4, float4)
CVT_COLOR_2VECTORS_INVOCATION(RGBA2BGRA, uchar4, uchar4, float4, float4)
// BGR/RGB/BGRA/RGBA <-> Gray
CVT_COLOR_2VECTORS_INVOCATION(BGR2GRAY, uchar3, uchar, float3, float)
CVT_COLOR_2VECTORS_INVOCATION(RGB2GRAY, uchar3, uchar, float3, float)
CVT_COLOR_2VECTORS_INVOCATION(BGRA2GRAY, uchar4, uchar, float4, float)
CVT_COLOR_2VECTORS_INVOCATION(RGBA2GRAY, uchar4, uchar, float4, float)
CVT_COLOR_1VECTORS_INVOCATION(GRAY2BGR, uchar, uchar3, float, float3)
CVT_COLOR_1VECTORS_INVOCATION(GRAY2RGB, uchar, uchar3, float, float3)
CVT_COLOR_2VECTORS_INVOCATION(GRAY2BGRA, uchar, uchar4, float, float4)
CVT_COLOR_2VECTORS_INVOCATION(GRAY2RGBA, uchar, uchar4, float, float4)
// BGR/RGB/BGRA/RGBA <-> YCrCb
CVT_COLOR_1VECTORS_INVOCATION(BGR2YCrCb, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(RGB2YCrCb, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(BGRA2YCrCb, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(RGBA2YCrCb, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(YCrCb2BGR, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(YCrCb2RGB, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(YCrCb2BGRA, uchar3, uchar4, float3, float4)
CVT_COLOR_2VECTORS_INVOCATION(YCrCb2RGBA, uchar3, uchar4, float3, float4)
// BGR/RGB/BGRA/RGBA <-> HSV
CVT_COLOR_2VECTORS_INVOCATION(BGR2HSV, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(RGB2HSV, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(BGRA2HSV, uchar4, uchar3, float4, float3)
CVT_COLOR_2VECTORS_INVOCATION(RGBA2HSV, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(HSV2BGR, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(HSV2RGB, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(HSV2BGRA, uchar3, uchar4, float3, float4)
CVT_COLOR_2VECTORS_INVOCATION(HSV2RGBA, uchar3, uchar4, float3, float4)
// BGR/RGB/BGRA/RGBA <-> LAB
CVT_COLOR_2VECTORS_INVOCATION(BGR2LAB, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(RGB2LAB, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(BGRA2LAB, uchar4, uchar3, float4, float3)
CVT_COLOR_2VECTORS_INVOCATION(RGBA2LAB, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(LAB2BGR, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(LAB2RGB, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(LAB2BGRA, uchar3, uchar4, float3, float4)
CVT_COLOR_1VECTORS_INVOCATION(LAB2RGBA, uchar3, uchar4, float3, float4)
// BGR/RGB/BGRA/RGBA <-> NV12
CVT_COLOR_TO_NVXX_INVOCATION(BGR2NV12, uchar3)
CVT_COLOR_TO_NVXX_INVOCATION(RGB2NV12, uchar3)
CVT_COLOR_TO_NVXX_INVOCATION(BGRA2NV12, uchar4)
CVT_COLOR_TO_NVXX_INVOCATION(RGBA2NV12, uchar4)
CVT_COLOR_FROM_NVXX_INVOCATION(NV122BGR, uchar3)
CVT_COLOR_FROM_NVXX_INVOCATION(NV122RGB, uchar3)
CVT_COLOR_FROM_NVXX_INVOCATION(NV122BGRA, uchar4)
CVT_COLOR_FROM_NVXX_INVOCATION(NV122RGBA, uchar4)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(BGR2NV12, uchar3)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(RGB2NV12, uchar3)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(BGRA2NV12, uchar4)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(RGBA2NV12, uchar4)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV122BGR, uchar3)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV122RGB, uchar3)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV122BGRA, uchar4)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV122RGBA, uchar4)
// BGR/RGB/BGRA/RGBA <-> NV21
CVT_COLOR_TO_NVXX_INVOCATION(BGR2NV21, uchar3)
CVT_COLOR_TO_NVXX_INVOCATION(RGB2NV21, uchar3)
CVT_COLOR_TO_NVXX_INVOCATION(BGRA2NV21, uchar4)
CVT_COLOR_TO_NVXX_INVOCATION(RGBA2NV21, uchar4)
CVT_COLOR_FROM_NVXX_INVOCATION(NV212BGR, uchar3)
CVT_COLOR_FROM_NVXX_INVOCATION(NV212RGB, uchar3)
CVT_COLOR_FROM_NVXX_INVOCATION(NV212BGRA, uchar4)
CVT_COLOR_FROM_NVXX_INVOCATION(NV212RGBA, uchar4)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(BGR2NV21, uchar3)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(RGB2NV21, uchar3)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(BGRA2NV21, uchar4)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(RGBA2NV21, uchar4)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV212BGR, uchar3)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV212RGB, uchar3)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV212BGRA, uchar4)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV212RGBA, uchar4)
// BGR/RGB/BGRA/RGBA <-> I420
CVT_COLOR_TO_I420_INVOCATION(BGR2I420, uchar3)
CVT_COLOR_TO_I420_INVOCATION(RGB2I420, uchar3)
CVT_COLOR_TO_I420_INVOCATION(BGRA2I420, uchar4)
CVT_COLOR_TO_I420_INVOCATION(RGBA2I420, uchar4)
CVT_COLOR_FROM_I420_INVOCATION(I4202BGR, uchar3)
CVT_COLOR_FROM_I420_INVOCATION(I4202RGB, uchar3)
CVT_COLOR_FROM_I420_INVOCATION(I4202BGRA, uchar4)
CVT_COLOR_FROM_I420_INVOCATION(I4202RGBA, uchar4)
CVT_COLOR_TO_DISCRETE_I420_INVOCATION(BGR2I420, uchar3)
CVT_COLOR_TO_DISCRETE_I420_INVOCATION(RGB2I420, uchar3)
CVT_COLOR_TO_DISCRETE_I420_INVOCATION(BGRA2I420, uchar4)
CVT_COLOR_TO_DISCRETE_I420_INVOCATION(RGBA2I420, uchar4)
CVT_COLOR_FROM_DISCRETE_I420_INVOCATION(I4202BGR, uchar3)
CVT_COLOR_FROM_DISCRETE_I420_INVOCATION(I4202RGB, uchar3)
CVT_COLOR_FROM_DISCRETE_I420_INVOCATION(I4202BGRA, uchar4)
CVT_COLOR_FROM_DISCRETE_I420_INVOCATION(I4202RGBA, uchar4)
// BGR/GRAY <-> UYVY
CVT_COLOR_FROM_YUV422_UCHAR_INVOCATION(UYVY2BGR, uchar4, uchar3)
CVT_COLOR_YUV422_TO_GRAY_UCHAR_INVOCATION(UYVY2GRAY, uchar2)
// BGR/GRAY <-> YUYV
CVT_COLOR_FROM_YUV422_UCHAR_INVOCATION(YUYV2BGR, uchar4, uchar3)
CVT_COLOR_YUV422_TO_GRAY_UCHAR_INVOCATION(YUYV2GRAY, uchar2)
/******************* definition of YUV2 -> GRAY ********************/
__global__
void cvtColorYUV2GRAYKernel0(const uchar* src, int rows, int cols,
int src_stride, uchar* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const uchar4* input = (uchar4*)(src + element_y * src_stride);
uchar4 value = input[element_x];
uchar4* output = (uchar4*)(dst + element_y * dst_stride);
output[element_x] = value;
}
__global__
void cvtColorYUV2GRAYKernel1(const uchar* src, int rows, int cols,
int src_stride, uchar* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
int index_x = element_x << 2;
if (element_y >= rows || index_x >= cols) {
return;
}
const uchar* input = src + element_y * src_stride;
uchar* output = dst + element_y * dst_stride;
uchar value0, value1, value2, value3;
if (index_x < cols - 4) {
value0 = input[index_x];
value1 = input[index_x + 1];
value2 = input[index_x + 2];
value3 = input[index_x + 3];
output[index_x] = value0;
output[index_x + 1] = value1;
output[index_x + 2] = value2;
output[index_x + 3] = value3;
}
else {
value0 = input[index_x];
if (index_x < cols - 1) {
value1 = input[index_x + 1];
}
if (index_x < cols - 2) {
value2 = input[index_x + 2];
}
if (index_x < cols - 3) {
value3 = input[index_x + 3];
}
output[index_x] = value0;
if (index_x < cols - 1) {
output[index_x + 1] = value1;
}
if (index_x < cols - 2) {
output[index_x + 2] = value2;
}
if (index_x < cols - 3) {
output[index_x + 3] = value3;
}
}
}
RetCode YUV2GRAY(const uchar* src, int rows, int cols, int src_stride,
uchar* dst, int dst_stride, hipStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(src_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * (int)sizeof(uchar));
int columns = divideUp(cols, 4, 2);
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(columns, kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
int padded_stride = roundUp(cols, 4, 2) * sizeof(uchar);
if (dst_stride >= padded_stride) {
hipLaunchKernelGGL(( cvtColorYUV2GRAYKernel0), dim3(grid), dim3(block), 0, stream, src, rows, columns,
src_stride, dst, dst_stride);
}
else {
hipLaunchKernelGGL(( cvtColorYUV2GRAYKernel1), dim3(grid), dim3(block), 0, stream, src, rows, cols,
src_stride, dst, dst_stride);
}
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode YUV2GRAY<uchar>(hipStream_t stream,
int rows,
int cols,
int src_stride,
const uchar* src,
int dst_stride,
uchar* dst) {
RetCode code = YUV2GRAY(src, rows, cols, src_stride, dst, dst_stride, stream);
return code;
}
/******************* definition of NV12/21 <-> I420 ********************/
__global__
void cvtColorNV2I420Kernel(const uchar* src_y, int rows, int cols,
int src_y_stride, const uchar* src_uv,
int src_uv_stride, uchar* dst_y, int dst_y_stride,
uchar* dst_u, int dst_u_stride, uchar* dst_v,
int dst_v_stride, bool is_NV12) {
int index_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int index_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
int element_x = index_x << 1;
int element_y = index_y << 1;
if (element_y >= rows || element_x >= cols) {
return;
}
uchar* input_y0 = (uchar*)src_y + element_y * src_y_stride;
uchar* input_y1 = input_y0 + src_y_stride;
uchar2* input_uv = (uchar2*)((uchar*)src_uv + index_y * src_uv_stride);
uchar* output_y0 = (uchar*)dst_y + element_y * dst_y_stride;
uchar* output_y1 = output_y0 + dst_y_stride;
uchar* output_u = (uchar*)((uchar*)dst_u + index_y * dst_u_stride);
uchar* output_v = (uchar*)((uchar*)dst_v + index_y * dst_v_stride);
uchar value_y00, value_y01, value_y10, value_y11;
value_y00 = input_y0[element_x];
value_y01 = input_y0[element_x + 1];
value_y10 = input_y1[element_x];
value_y11 = input_y1[element_x + 1];
uchar2 value_uv = input_uv[index_x];
output_y0[element_x] = value_y00;
output_y0[element_x + 1] = value_y01;
output_y1[element_x] = value_y10;
output_y1[element_x + 1] = value_y11;
if (is_NV12) {
output_u[index_x] = value_uv.x;
output_v[index_x] = value_uv.y;
}
else {
output_u[index_x] = value_uv.y;
output_v[index_x] = value_uv.x;
}
}
RetCode NV122I420(const uchar* src_y, int rows, int cols, int src_y_stride,
const uchar* src_uv, int src_uv_stride, uchar* dst_y,
int dst_y_stride, uchar* dst_u, int dst_u_stride,
uchar* dst_v, int dst_v_stride, hipStream_t stream) {
PPL_ASSERT(src_y != nullptr);
PPL_ASSERT(src_uv != nullptr);
PPL_ASSERT(dst_u != nullptr);
PPL_ASSERT(dst_v != nullptr);
PPL_ASSERT(dst_y != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(rows % 1 == 0 && cols % 1 == 0);
PPL_ASSERT(src_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(src_uv_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_u_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(dst_v_stride >= cols / 2 * (int)sizeof(uchar));
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(cols, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(divideUp(rows, 2, 1), kBlockDimY0, kBlockShiftY0);
hipLaunchKernelGGL(( cvtColorNV2I420Kernel), dim3(grid), dim3(block), 0, stream, src_y, rows, cols,
src_y_stride, src_uv, src_uv_stride, dst_y, dst_y_stride, dst_u,
dst_u_stride, dst_v, dst_v_stride, true);
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode NV122I420<uchar>(hipStream_t stream,
int rows,
int cols,
int src_y_stride,
const uchar* src_y,
int src_uv_stride,
const uchar* src_uv,
int dst_y_stride,
uchar* dst_y,
int dst_u_stride,
uchar* dst_u,
int dst_v_stride,
uchar* dst_v) {
RetCode code = NV122I420(src_y, rows, cols, src_y_stride, src_uv,
src_uv_stride, dst_y, dst_y_stride, dst_u,
dst_u_stride, dst_v, dst_v_stride, stream);
return code;
}
RetCode NV212I420(const uchar* src_y, int rows, int cols, int src_y_stride,
const uchar* src_uv, int src_uv_stride, uchar* dst_y,
int dst_y_stride, uchar* dst_u, int dst_u_stride,
uchar* dst_v, int dst_v_stride, hipStream_t stream) {
PPL_ASSERT(src_y != nullptr);
PPL_ASSERT(src_uv != nullptr);
PPL_ASSERT(dst_u != nullptr);
PPL_ASSERT(dst_v != nullptr);
PPL_ASSERT(dst_y != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(rows % 1 == 0 && cols % 1 == 0);
PPL_ASSERT(src_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(src_uv_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_u_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(dst_v_stride >= cols / 2 * (int)sizeof(uchar));
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(cols, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(divideUp(rows, 2, 1), kBlockDimY0, kBlockShiftY0);
hipLaunchKernelGGL(( cvtColorNV2I420Kernel), dim3(grid), dim3(block), 0, stream, src_y, rows, cols,
src_y_stride, src_uv, src_uv_stride, dst_y, dst_y_stride, dst_u,
dst_u_stride, dst_v, dst_v_stride, false);
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode NV212I420<uchar>(hipStream_t stream,
int rows,
int cols,
int src_y_stride,
const uchar* src_y,
int src_uv_stride,
const uchar* src_uv,
int dst_y_stride,
uchar* dst_y,
int dst_u_stride,
uchar* dst_u,
int dst_v_stride,
uchar* dst_v) {
RetCode code = NV212I420(src_y, rows, cols, src_y_stride, src_uv,
src_uv_stride, dst_y, dst_y_stride, dst_u,
dst_u_stride, dst_v, dst_v_stride, stream);
return code;
}
__global__
void cvtColorI4202NVKernel(const uchar* src_y, int rows, int cols,
int src_y_stride, const uchar* src_u,
int src_u_stride, const uchar* src_v,
int src_v_stride, uchar* dst_y, int dst_y_stride,
uchar* dst_uv, int dst_uv_stride, bool is_NV12) {
int index_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int index_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
int element_x = index_x << 1;
int element_y = index_y << 1;
if (element_y >= rows || element_x >= cols) {
return;
}
uchar* input_y0 = (uchar*)src_y + element_y * src_y_stride;
uchar* input_y1 = input_y0 + src_y_stride;
uchar* input_u = (uchar*)((uchar*)src_u + index_y * src_u_stride);
uchar* input_v = (uchar*)((uchar*)src_v + index_y * src_v_stride);
uchar* output_y0 = (uchar*)dst_y + element_y * dst_y_stride;
uchar* output_y1 = output_y0 + dst_y_stride;
uchar2* output_uv = (uchar2*)((uchar*)dst_uv + index_y * dst_uv_stride);
uchar value_y00, value_y01, value_y10, value_y11;
value_y00 = input_y0[element_x];
value_y01 = input_y0[element_x + 1];
value_y10 = input_y1[element_x];
value_y11 = input_y1[element_x + 1];
uchar value_u = input_u[index_x];
uchar value_v = input_v[index_x];
output_y0[element_x] = value_y00;
output_y0[element_x + 1] = value_y01;
output_y1[element_x] = value_y10;
output_y1[element_x + 1] = value_y11;
if (is_NV12) {
output_uv[index_x] = make_uchar2(value_u, value_v);
}
else {
output_uv[index_x] = make_uchar2(value_v, value_u);
}
}
RetCode I4202NV12(const uchar* src_y, int rows, int cols, int src_y_stride,
const uchar* src_u, int src_u_stride, const uchar* src_v,
int src_v_stride, uchar* dst_y, int dst_y_stride,
uchar* dst_uv, int dst_uv_stride, hipStream_t stream) {
PPL_ASSERT(src_y != nullptr);
PPL_ASSERT(src_u != nullptr);
PPL_ASSERT(src_v != nullptr);
PPL_ASSERT(dst_y != nullptr);
PPL_ASSERT(dst_uv != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(rows % 1 == 0 && cols % 1 == 0);
PPL_ASSERT(src_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(src_u_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(src_v_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(dst_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_uv_stride >= cols * (int)sizeof(uchar));
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(cols, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(divideUp(rows, 2, 1), kBlockDimY0, kBlockShiftY0);
hipLaunchKernelGGL(( cvtColorI4202NVKernel), dim3(grid), dim3(block), 0, stream, src_y, rows, cols,
src_y_stride, src_u, src_u_stride, src_v, src_v_stride, dst_y,
dst_y_stride, dst_uv, dst_uv_stride, true);
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode I4202NV12<uchar>(hipStream_t stream,
int rows,
int cols,
int src_y_stride,
const uchar* src_y,
int src_u_stride,
const uchar* src_u,
int src_v_stride,
const uchar* src_v,
int dst_y_stride,
uchar* dst_y,
int dst_uv_stride,
uchar* dst_uv) {
RetCode code = I4202NV12(src_y, rows, cols, src_y_stride, src_u, src_u_stride,
src_v, src_v_stride, dst_y, dst_y_stride, dst_uv,
dst_uv_stride, stream);
return code;
}
RetCode I4202NV21(const uchar* src_y, int rows, int cols, int src_y_stride,
const uchar* src_u, int src_u_stride, const uchar* src_v,
int src_v_stride, uchar* dst_y, int dst_y_stride,
uchar* dst_uv, int dst_uv_stride, hipStream_t stream) {
PPL_ASSERT(src_y != nullptr);
PPL_ASSERT(src_u != nullptr);
PPL_ASSERT(src_v != nullptr);
PPL_ASSERT(dst_y != nullptr);
PPL_ASSERT(dst_uv != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(rows % 1 == 0 && cols % 1 == 0);
PPL_ASSERT(src_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(src_u_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(src_v_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(dst_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_uv_stride >= cols * (int)sizeof(uchar));
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(cols, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(divideUp(rows, 2, 1), kBlockDimY0, kBlockShiftY0);
hipLaunchKernelGGL(( cvtColorI4202NVKernel), dim3(grid), dim3(block), 0, stream, src_y, rows, cols,
src_y_stride, src_u, src_u_stride, src_v, src_v_stride, dst_y,
dst_y_stride, dst_uv, dst_uv_stride, false);
hipError_t code = hipGetLastError();
if (code != hipSuccess) {
LOG(ERROR) << "CUDA error: " << hipGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode I4202NV21<uchar>(hipStream_t stream,
int rows,
int cols,
int src_y_stride,
const uchar* src_y,
int src_u_stride,
const uchar* src_u,
int src_v_stride,
const uchar* src_v,
int dst_y_stride,
uchar* dst_y,
int dst_uv_stride,
uchar* dst_uv) {
RetCode code = I4202NV21(src_y, rows, cols, src_y_stride, src_u, src_u_stride,
src_v, src_v_stride, dst_y, dst_y_stride, dst_uv,
dst_uv_stride, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
| 093ea546431bb55e09a24cb99075bb469c3cb092.cu | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#include "ppl/cv/cuda/cvtcolor.h"
#include "cvtcolor_compute.hpp"
#include "cvtcolor_memory.hpp"
#include "utility.hpp"
namespace ppl {
namespace cv {
namespace cuda {
// BGR(RBB) <-> BGRA(RGBA)
CVT_COLOR_2VECTORS_INVOCATION(BGR2BGRA, uchar3, uchar4, float3, float4)
CVT_COLOR_2VECTORS_INVOCATION(RGB2RGBA, uchar3, uchar4, float3, float4)
CVT_COLOR_1VECTORS_INVOCATION(BGRA2BGR, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(RGBA2RGB, uchar4, uchar3, float4, float3)
CVT_COLOR_2VECTORS_INVOCATION(BGR2RGBA, uchar3, uchar4, float3, float4)
CVT_COLOR_2VECTORS_INVOCATION(RGB2BGRA, uchar3, uchar4, float3, float4)
CVT_COLOR_1VECTORS_INVOCATION(RGBA2BGR, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(BGRA2RGB, uchar4, uchar3, float4, float3)
// BGR <-> RGB
CVT_COLOR_1VECTORS_INVOCATION(BGR2RGB, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(RGB2BGR, uchar3, uchar3, float3, float3)
// BGRA <-> RGBA
CVT_COLOR_2VECTORS_INVOCATION(BGRA2RGBA, uchar4, uchar4, float4, float4)
CVT_COLOR_2VECTORS_INVOCATION(RGBA2BGRA, uchar4, uchar4, float4, float4)
// BGR/RGB/BGRA/RGBA <-> Gray
CVT_COLOR_2VECTORS_INVOCATION(BGR2GRAY, uchar3, uchar, float3, float)
CVT_COLOR_2VECTORS_INVOCATION(RGB2GRAY, uchar3, uchar, float3, float)
CVT_COLOR_2VECTORS_INVOCATION(BGRA2GRAY, uchar4, uchar, float4, float)
CVT_COLOR_2VECTORS_INVOCATION(RGBA2GRAY, uchar4, uchar, float4, float)
CVT_COLOR_1VECTORS_INVOCATION(GRAY2BGR, uchar, uchar3, float, float3)
CVT_COLOR_1VECTORS_INVOCATION(GRAY2RGB, uchar, uchar3, float, float3)
CVT_COLOR_2VECTORS_INVOCATION(GRAY2BGRA, uchar, uchar4, float, float4)
CVT_COLOR_2VECTORS_INVOCATION(GRAY2RGBA, uchar, uchar4, float, float4)
// BGR/RGB/BGRA/RGBA <-> YCrCb
CVT_COLOR_1VECTORS_INVOCATION(BGR2YCrCb, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(RGB2YCrCb, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(BGRA2YCrCb, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(RGBA2YCrCb, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(YCrCb2BGR, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(YCrCb2RGB, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(YCrCb2BGRA, uchar3, uchar4, float3, float4)
CVT_COLOR_2VECTORS_INVOCATION(YCrCb2RGBA, uchar3, uchar4, float3, float4)
// BGR/RGB/BGRA/RGBA <-> HSV
CVT_COLOR_2VECTORS_INVOCATION(BGR2HSV, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(RGB2HSV, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(BGRA2HSV, uchar4, uchar3, float4, float3)
CVT_COLOR_2VECTORS_INVOCATION(RGBA2HSV, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(HSV2BGR, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(HSV2RGB, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(HSV2BGRA, uchar3, uchar4, float3, float4)
CVT_COLOR_2VECTORS_INVOCATION(HSV2RGBA, uchar3, uchar4, float3, float4)
// BGR/RGB/BGRA/RGBA <-> LAB
CVT_COLOR_2VECTORS_INVOCATION(BGR2LAB, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(RGB2LAB, uchar3, uchar3, float3, float3)
CVT_COLOR_2VECTORS_INVOCATION(BGRA2LAB, uchar4, uchar3, float4, float3)
CVT_COLOR_2VECTORS_INVOCATION(RGBA2LAB, uchar4, uchar3, float4, float3)
CVT_COLOR_1VECTORS_INVOCATION(LAB2BGR, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(LAB2RGB, uchar3, uchar3, float3, float3)
CVT_COLOR_1VECTORS_INVOCATION(LAB2BGRA, uchar3, uchar4, float3, float4)
CVT_COLOR_1VECTORS_INVOCATION(LAB2RGBA, uchar3, uchar4, float3, float4)
// BGR/RGB/BGRA/RGBA <-> NV12
CVT_COLOR_TO_NVXX_INVOCATION(BGR2NV12, uchar3)
CVT_COLOR_TO_NVXX_INVOCATION(RGB2NV12, uchar3)
CVT_COLOR_TO_NVXX_INVOCATION(BGRA2NV12, uchar4)
CVT_COLOR_TO_NVXX_INVOCATION(RGBA2NV12, uchar4)
CVT_COLOR_FROM_NVXX_INVOCATION(NV122BGR, uchar3)
CVT_COLOR_FROM_NVXX_INVOCATION(NV122RGB, uchar3)
CVT_COLOR_FROM_NVXX_INVOCATION(NV122BGRA, uchar4)
CVT_COLOR_FROM_NVXX_INVOCATION(NV122RGBA, uchar4)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(BGR2NV12, uchar3)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(RGB2NV12, uchar3)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(BGRA2NV12, uchar4)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(RGBA2NV12, uchar4)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV122BGR, uchar3)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV122RGB, uchar3)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV122BGRA, uchar4)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV122RGBA, uchar4)
// BGR/RGB/BGRA/RGBA <-> NV21
CVT_COLOR_TO_NVXX_INVOCATION(BGR2NV21, uchar3)
CVT_COLOR_TO_NVXX_INVOCATION(RGB2NV21, uchar3)
CVT_COLOR_TO_NVXX_INVOCATION(BGRA2NV21, uchar4)
CVT_COLOR_TO_NVXX_INVOCATION(RGBA2NV21, uchar4)
CVT_COLOR_FROM_NVXX_INVOCATION(NV212BGR, uchar3)
CVT_COLOR_FROM_NVXX_INVOCATION(NV212RGB, uchar3)
CVT_COLOR_FROM_NVXX_INVOCATION(NV212BGRA, uchar4)
CVT_COLOR_FROM_NVXX_INVOCATION(NV212RGBA, uchar4)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(BGR2NV21, uchar3)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(RGB2NV21, uchar3)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(BGRA2NV21, uchar4)
CVT_COLOR_TO_DISCRETE_NVXX_INVOCATION(RGBA2NV21, uchar4)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV212BGR, uchar3)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV212RGB, uchar3)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV212BGRA, uchar4)
CVT_COLOR_FROM_DISCRETE_NVXX_INVOCATION(NV212RGBA, uchar4)
// BGR/RGB/BGRA/RGBA <-> I420
CVT_COLOR_TO_I420_INVOCATION(BGR2I420, uchar3)
CVT_COLOR_TO_I420_INVOCATION(RGB2I420, uchar3)
CVT_COLOR_TO_I420_INVOCATION(BGRA2I420, uchar4)
CVT_COLOR_TO_I420_INVOCATION(RGBA2I420, uchar4)
CVT_COLOR_FROM_I420_INVOCATION(I4202BGR, uchar3)
CVT_COLOR_FROM_I420_INVOCATION(I4202RGB, uchar3)
CVT_COLOR_FROM_I420_INVOCATION(I4202BGRA, uchar4)
CVT_COLOR_FROM_I420_INVOCATION(I4202RGBA, uchar4)
CVT_COLOR_TO_DISCRETE_I420_INVOCATION(BGR2I420, uchar3)
CVT_COLOR_TO_DISCRETE_I420_INVOCATION(RGB2I420, uchar3)
CVT_COLOR_TO_DISCRETE_I420_INVOCATION(BGRA2I420, uchar4)
CVT_COLOR_TO_DISCRETE_I420_INVOCATION(RGBA2I420, uchar4)
CVT_COLOR_FROM_DISCRETE_I420_INVOCATION(I4202BGR, uchar3)
CVT_COLOR_FROM_DISCRETE_I420_INVOCATION(I4202RGB, uchar3)
CVT_COLOR_FROM_DISCRETE_I420_INVOCATION(I4202BGRA, uchar4)
CVT_COLOR_FROM_DISCRETE_I420_INVOCATION(I4202RGBA, uchar4)
// BGR/GRAY <-> UYVY
CVT_COLOR_FROM_YUV422_UCHAR_INVOCATION(UYVY2BGR, uchar4, uchar3)
CVT_COLOR_YUV422_TO_GRAY_UCHAR_INVOCATION(UYVY2GRAY, uchar2)
// BGR/GRAY <-> YUYV
CVT_COLOR_FROM_YUV422_UCHAR_INVOCATION(YUYV2BGR, uchar4, uchar3)
CVT_COLOR_YUV422_TO_GRAY_UCHAR_INVOCATION(YUYV2GRAY, uchar2)
/******************* definition of YUV2 -> GRAY ********************/
__global__
void cvtColorYUV2GRAYKernel0(const uchar* src, int rows, int cols,
int src_stride, uchar* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
const uchar4* input = (uchar4*)(src + element_y * src_stride);
uchar4 value = input[element_x];
uchar4* output = (uchar4*)(dst + element_y * dst_stride);
output[element_x] = value;
}
__global__
void cvtColorYUV2GRAYKernel1(const uchar* src, int rows, int cols,
int src_stride, uchar* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
int index_x = element_x << 2;
if (element_y >= rows || index_x >= cols) {
return;
}
const uchar* input = src + element_y * src_stride;
uchar* output = dst + element_y * dst_stride;
uchar value0, value1, value2, value3;
if (index_x < cols - 4) {
value0 = input[index_x];
value1 = input[index_x + 1];
value2 = input[index_x + 2];
value3 = input[index_x + 3];
output[index_x] = value0;
output[index_x + 1] = value1;
output[index_x + 2] = value2;
output[index_x + 3] = value3;
}
else {
value0 = input[index_x];
if (index_x < cols - 1) {
value1 = input[index_x + 1];
}
if (index_x < cols - 2) {
value2 = input[index_x + 2];
}
if (index_x < cols - 3) {
value3 = input[index_x + 3];
}
output[index_x] = value0;
if (index_x < cols - 1) {
output[index_x + 1] = value1;
}
if (index_x < cols - 2) {
output[index_x + 2] = value2;
}
if (index_x < cols - 3) {
output[index_x + 3] = value3;
}
}
}
RetCode YUV2GRAY(const uchar* src, int rows, int cols, int src_stride,
uchar* dst, int dst_stride, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(src_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * (int)sizeof(uchar));
int columns = divideUp(cols, 4, 2);
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(columns, kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
int padded_stride = roundUp(cols, 4, 2) * sizeof(uchar);
if (dst_stride >= padded_stride) {
cvtColorYUV2GRAYKernel0<<<grid, block, 0, stream>>>(src, rows, columns,
src_stride, dst, dst_stride);
}
else {
cvtColorYUV2GRAYKernel1<<<grid, block, 0, stream>>>(src, rows, cols,
src_stride, dst, dst_stride);
}
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode YUV2GRAY<uchar>(cudaStream_t stream,
int rows,
int cols,
int src_stride,
const uchar* src,
int dst_stride,
uchar* dst) {
RetCode code = YUV2GRAY(src, rows, cols, src_stride, dst, dst_stride, stream);
return code;
}
/******************* definition of NV12/21 <-> I420 ********************/
__global__
void cvtColorNV2I420Kernel(const uchar* src_y, int rows, int cols,
int src_y_stride, const uchar* src_uv,
int src_uv_stride, uchar* dst_y, int dst_y_stride,
uchar* dst_u, int dst_u_stride, uchar* dst_v,
int dst_v_stride, bool is_NV12) {
int index_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int index_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
int element_x = index_x << 1;
int element_y = index_y << 1;
if (element_y >= rows || element_x >= cols) {
return;
}
uchar* input_y0 = (uchar*)src_y + element_y * src_y_stride;
uchar* input_y1 = input_y0 + src_y_stride;
uchar2* input_uv = (uchar2*)((uchar*)src_uv + index_y * src_uv_stride);
uchar* output_y0 = (uchar*)dst_y + element_y * dst_y_stride;
uchar* output_y1 = output_y0 + dst_y_stride;
uchar* output_u = (uchar*)((uchar*)dst_u + index_y * dst_u_stride);
uchar* output_v = (uchar*)((uchar*)dst_v + index_y * dst_v_stride);
uchar value_y00, value_y01, value_y10, value_y11;
value_y00 = input_y0[element_x];
value_y01 = input_y0[element_x + 1];
value_y10 = input_y1[element_x];
value_y11 = input_y1[element_x + 1];
uchar2 value_uv = input_uv[index_x];
output_y0[element_x] = value_y00;
output_y0[element_x + 1] = value_y01;
output_y1[element_x] = value_y10;
output_y1[element_x + 1] = value_y11;
if (is_NV12) {
output_u[index_x] = value_uv.x;
output_v[index_x] = value_uv.y;
}
else {
output_u[index_x] = value_uv.y;
output_v[index_x] = value_uv.x;
}
}
RetCode NV122I420(const uchar* src_y, int rows, int cols, int src_y_stride,
const uchar* src_uv, int src_uv_stride, uchar* dst_y,
int dst_y_stride, uchar* dst_u, int dst_u_stride,
uchar* dst_v, int dst_v_stride, cudaStream_t stream) {
PPL_ASSERT(src_y != nullptr);
PPL_ASSERT(src_uv != nullptr);
PPL_ASSERT(dst_u != nullptr);
PPL_ASSERT(dst_v != nullptr);
PPL_ASSERT(dst_y != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(rows % 1 == 0 && cols % 1 == 0);
PPL_ASSERT(src_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(src_uv_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_u_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(dst_v_stride >= cols / 2 * (int)sizeof(uchar));
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(cols, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(divideUp(rows, 2, 1), kBlockDimY0, kBlockShiftY0);
cvtColorNV2I420Kernel<<<grid, block, 0, stream>>>(src_y, rows, cols,
src_y_stride, src_uv, src_uv_stride, dst_y, dst_y_stride, dst_u,
dst_u_stride, dst_v, dst_v_stride, true);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode NV122I420<uchar>(cudaStream_t stream,
int rows,
int cols,
int src_y_stride,
const uchar* src_y,
int src_uv_stride,
const uchar* src_uv,
int dst_y_stride,
uchar* dst_y,
int dst_u_stride,
uchar* dst_u,
int dst_v_stride,
uchar* dst_v) {
RetCode code = NV122I420(src_y, rows, cols, src_y_stride, src_uv,
src_uv_stride, dst_y, dst_y_stride, dst_u,
dst_u_stride, dst_v, dst_v_stride, stream);
return code;
}
RetCode NV212I420(const uchar* src_y, int rows, int cols, int src_y_stride,
const uchar* src_uv, int src_uv_stride, uchar* dst_y,
int dst_y_stride, uchar* dst_u, int dst_u_stride,
uchar* dst_v, int dst_v_stride, cudaStream_t stream) {
PPL_ASSERT(src_y != nullptr);
PPL_ASSERT(src_uv != nullptr);
PPL_ASSERT(dst_u != nullptr);
PPL_ASSERT(dst_v != nullptr);
PPL_ASSERT(dst_y != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(rows % 1 == 0 && cols % 1 == 0);
PPL_ASSERT(src_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(src_uv_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_u_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(dst_v_stride >= cols / 2 * (int)sizeof(uchar));
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(cols, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(divideUp(rows, 2, 1), kBlockDimY0, kBlockShiftY0);
cvtColorNV2I420Kernel<<<grid, block, 0, stream>>>(src_y, rows, cols,
src_y_stride, src_uv, src_uv_stride, dst_y, dst_y_stride, dst_u,
dst_u_stride, dst_v, dst_v_stride, false);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode NV212I420<uchar>(cudaStream_t stream,
int rows,
int cols,
int src_y_stride,
const uchar* src_y,
int src_uv_stride,
const uchar* src_uv,
int dst_y_stride,
uchar* dst_y,
int dst_u_stride,
uchar* dst_u,
int dst_v_stride,
uchar* dst_v) {
RetCode code = NV212I420(src_y, rows, cols, src_y_stride, src_uv,
src_uv_stride, dst_y, dst_y_stride, dst_u,
dst_u_stride, dst_v, dst_v_stride, stream);
return code;
}
__global__
void cvtColorI4202NVKernel(const uchar* src_y, int rows, int cols,
int src_y_stride, const uchar* src_u,
int src_u_stride, const uchar* src_v,
int src_v_stride, uchar* dst_y, int dst_y_stride,
uchar* dst_uv, int dst_uv_stride, bool is_NV12) {
int index_x = (blockIdx.x << kBlockShiftX0) + threadIdx.x;
int index_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
int element_x = index_x << 1;
int element_y = index_y << 1;
if (element_y >= rows || element_x >= cols) {
return;
}
uchar* input_y0 = (uchar*)src_y + element_y * src_y_stride;
uchar* input_y1 = input_y0 + src_y_stride;
uchar* input_u = (uchar*)((uchar*)src_u + index_y * src_u_stride);
uchar* input_v = (uchar*)((uchar*)src_v + index_y * src_v_stride);
uchar* output_y0 = (uchar*)dst_y + element_y * dst_y_stride;
uchar* output_y1 = output_y0 + dst_y_stride;
uchar2* output_uv = (uchar2*)((uchar*)dst_uv + index_y * dst_uv_stride);
uchar value_y00, value_y01, value_y10, value_y11;
value_y00 = input_y0[element_x];
value_y01 = input_y0[element_x + 1];
value_y10 = input_y1[element_x];
value_y11 = input_y1[element_x + 1];
uchar value_u = input_u[index_x];
uchar value_v = input_v[index_x];
output_y0[element_x] = value_y00;
output_y0[element_x + 1] = value_y01;
output_y1[element_x] = value_y10;
output_y1[element_x + 1] = value_y11;
if (is_NV12) {
output_uv[index_x] = make_uchar2(value_u, value_v);
}
else {
output_uv[index_x] = make_uchar2(value_v, value_u);
}
}
RetCode I4202NV12(const uchar* src_y, int rows, int cols, int src_y_stride,
const uchar* src_u, int src_u_stride, const uchar* src_v,
int src_v_stride, uchar* dst_y, int dst_y_stride,
uchar* dst_uv, int dst_uv_stride, cudaStream_t stream) {
PPL_ASSERT(src_y != nullptr);
PPL_ASSERT(src_u != nullptr);
PPL_ASSERT(src_v != nullptr);
PPL_ASSERT(dst_y != nullptr);
PPL_ASSERT(dst_uv != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(rows % 1 == 0 && cols % 1 == 0);
PPL_ASSERT(src_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(src_u_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(src_v_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(dst_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_uv_stride >= cols * (int)sizeof(uchar));
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(cols, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(divideUp(rows, 2, 1), kBlockDimY0, kBlockShiftY0);
cvtColorI4202NVKernel<<<grid, block, 0, stream>>>(src_y, rows, cols,
src_y_stride, src_u, src_u_stride, src_v, src_v_stride, dst_y,
dst_y_stride, dst_uv, dst_uv_stride, true);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode I4202NV12<uchar>(cudaStream_t stream,
int rows,
int cols,
int src_y_stride,
const uchar* src_y,
int src_u_stride,
const uchar* src_u,
int src_v_stride,
const uchar* src_v,
int dst_y_stride,
uchar* dst_y,
int dst_uv_stride,
uchar* dst_uv) {
RetCode code = I4202NV12(src_y, rows, cols, src_y_stride, src_u, src_u_stride,
src_v, src_v_stride, dst_y, dst_y_stride, dst_uv,
dst_uv_stride, stream);
return code;
}
RetCode I4202NV21(const uchar* src_y, int rows, int cols, int src_y_stride,
const uchar* src_u, int src_u_stride, const uchar* src_v,
int src_v_stride, uchar* dst_y, int dst_y_stride,
uchar* dst_uv, int dst_uv_stride, cudaStream_t stream) {
PPL_ASSERT(src_y != nullptr);
PPL_ASSERT(src_u != nullptr);
PPL_ASSERT(src_v != nullptr);
PPL_ASSERT(dst_y != nullptr);
PPL_ASSERT(dst_uv != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(rows % 1 == 0 && cols % 1 == 0);
PPL_ASSERT(src_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(src_u_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(src_v_stride >= cols / 2 * (int)sizeof(uchar));
PPL_ASSERT(dst_y_stride >= cols * (int)sizeof(uchar));
PPL_ASSERT(dst_uv_stride >= cols * (int)sizeof(uchar));
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(cols, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(divideUp(rows, 2, 1), kBlockDimY0, kBlockShiftY0);
cvtColorI4202NVKernel<<<grid, block, 0, stream>>>(src_y, rows, cols,
src_y_stride, src_u, src_u_stride, src_v, src_v_stride, dst_y,
dst_y_stride, dst_uv, dst_uv_stride, false);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode I4202NV21<uchar>(cudaStream_t stream,
int rows,
int cols,
int src_y_stride,
const uchar* src_y,
int src_u_stride,
const uchar* src_u,
int src_v_stride,
const uchar* src_v,
int dst_y_stride,
uchar* dst_y,
int dst_uv_stride,
uchar* dst_uv) {
RetCode code = I4202NV21(src_y, rows, cols, src_y_stride, src_u, src_u_stride,
src_v, src_v_stride, dst_y, dst_y_stride, dst_uv,
dst_uv_stride, stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
|
7b45eb90f78eb4cb7fb6d5027d7767a5ae114f8b.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
#include "util.h"
int main(void)
{
// generate 32M random numbers serially
thrust::host_vector<int> h_vec(1024 << 20);
// thrust::host_vector<int> h_vec(1024 << 8);
std::generate(h_vec.begin(), h_vec.end(), rand);
static int counter;
static int size;
size = h_vec.size();
std::cout << "size:" << size << std::endl;
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// sort data on the device (846M keys per second on GeForce GTX 480)
thrust::sort(d_vec.begin(), d_vec.end());
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
thrust::unique(d_vec.begin(),d_vec.end());
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf( "Time to reorder array and points to the first repeated value: %3.1f ms\n", elapsedTime );
size = d_vec.size();
std::cout << "size:" << size << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
/*
counter = 0;
for(auto itr = h_vec.begin(); itr != h_vec.end(); ++itr)
{
if(counter % 10000==0)
std::cout << *itr << std::endl;
counter = counter + 1;
}
*/
return 0;
}
| 7b45eb90f78eb4cb7fb6d5027d7767a5ae114f8b.cu | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
#include "util.h"
int main(void)
{
// generate 32M random numbers serially
thrust::host_vector<int> h_vec(1024 << 20);
// thrust::host_vector<int> h_vec(1024 << 8);
std::generate(h_vec.begin(), h_vec.end(), rand);
static int counter;
static int size;
size = h_vec.size();
std::cout << "size:" << size << std::endl;
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// sort data on the device (846M keys per second on GeForce GTX 480)
thrust::sort(d_vec.begin(), d_vec.end());
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
thrust::unique(d_vec.begin(),d_vec.end());
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf( "Time to reorder array and points to the first repeated value: %3.1f ms\n", elapsedTime );
size = d_vec.size();
std::cout << "size:" << size << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
/*
counter = 0;
for(auto itr = h_vec.begin(); itr != h_vec.end(); ++itr)
{
if(counter % 10000==0)
std::cout << *itr << std::endl;
counter = counter + 1;
}
*/
return 0;
}
|
ffdf2cf121e177468f22539d625c5a7255eb983b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_fp16.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <chrono>
#include "common_struct.h"
#include "common.h"
#include "model_init.h"
#include <iostream>
using namespace std;
__global__ void init_rand_state(hiprandState_t*state, int size)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < size)hiprand_init(clock() + tid, tid, 0, &state[tid]);
// if(tid < size)hiprand_init(1, tid, 0, &state[tid]);
}
__global__ void init_rand_feature_single(hiprandState_t*state, unsigned int state_size, float* array , unsigned int dim, unsigned int k)
{
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int number_threads = gridDim.x*blockDim.x;
if (state_size > tid){
for (unsigned int i = tid; i < dim * k; i+= state_size){
array[i] = (float)((hiprand_normal_double(&state[tid])* 0.01)) ;
}
}
}
__global__ void cpyparams2grouped_params_for_comparison_indexing(float* original_params, __half** converted_params, unsigned int *group_end_idx, unsigned int *entity2group, unsigned int *entity2sorted_idx, unsigned int k, unsigned int n, unsigned int group_num){
extern __shared__ unsigned int end_idx_s[];
for (int i = threadIdx.x; i < group_num; i+= blockDim.x){
end_idx_s[i+1] = group_end_idx[i];
}
if (threadIdx.x == 0){
end_idx_s[0] = -1;
}
__syncthreads();
unsigned int g_wid = (blockIdx.x*blockDim.x + threadIdx.x)/32;
unsigned int num_w = gridDim.x*blockDim.x/32;
unsigned int lane_id = threadIdx.x%32;
for (; g_wid < n; g_wid += num_w){
unsigned int group_idx = entity2group[g_wid];
unsigned int base_idx = (entity2sorted_idx[g_wid]- (end_idx_s[group_idx] + 1)) * k;
((__half*)converted_params[group_idx])[base_idx + lane_id] = __float2half_rn(original_params[g_wid * k + lane_id]);
((__half*)converted_params[group_idx])[base_idx + lane_id + 32] = __float2half_rn(original_params[g_wid * k + lane_id + 32]);
((__half*)converted_params[group_idx])[base_idx + lane_id + 64] = __float2half_rn(original_params[g_wid * k + lane_id + 64]);
((__half*)converted_params[group_idx])[base_idx + lane_id + 96] = __float2half_rn(original_params[g_wid * k + lane_id + 96]);
}
}
__global__ void cpyparams2grouped_params_for_comparison_indexing_k64(float* original_params, __half** converted_params, unsigned int *group_end_idx, unsigned int *entity2group, unsigned int *entity2sorted_idx, unsigned int k, unsigned int n, unsigned int group_num){
extern __shared__ unsigned int end_idx_s[];
// if (threadIdx.x < group_num){ end_idx_s[threadIdx.x+1] = group_end_idx[threadIdx.x];}
for (int i = threadIdx.x; i < group_num; i+= blockDim.x){
end_idx_s[i+1] = group_end_idx[i];
}
if (threadIdx.x == 0){
end_idx_s[0] = -1;
}
__syncthreads();
unsigned int g_wid = (blockIdx.x*blockDim.x + threadIdx.x)/32;
unsigned int num_w = gridDim.x*blockDim.x/32;
unsigned int lane_id = threadIdx.x%32;
for (; g_wid < n; g_wid += num_w){
unsigned int group_idx = entity2group[g_wid];
unsigned int base_idx = (entity2sorted_idx[g_wid]- (end_idx_s[group_idx] + 1)) * k;
((__half*)converted_params[group_idx])[base_idx + lane_id] = __float2half_rn(original_params[g_wid * k + lane_id]);
((__half*)converted_params[group_idx])[base_idx + lane_id + 32] = __float2half_rn(original_params[g_wid * k + lane_id + 32]);
}
}
__global__ void transform_half2float(float *gpu_float_feature, half *gpu_half_feature, unsigned int vec_size)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
int number_threads = gridDim.x*blockDim.x;
for (unsigned int i = tid; i < vec_size; i += number_threads){
if (i < vec_size)
gpu_float_feature[i] = __half2float(gpu_half_feature[i]);
}
}
__global__ void transform_float2half(__half* half_feature, float *gpu_float_feature, unsigned int vec_size){
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
int number_threads = gridDim.x*blockDim.x;
for (unsigned int i = tid; i < vec_size; i += number_threads){
if (i < vec_size){
half_feature[i] = __float2half_rn(gpu_float_feature[i]);
}
}
}
__global__ void mem_cpy_fp16tofp32(float* out, __half* in, int n){
int i = threadIdx.x + blockIdx.x * blockDim.x;
for (; i < n; i += gridDim.x * blockDim.x)
out[i] = __half2float(in[i]);
}
void init_features_single(float *feature_vec, unsigned int dim, unsigned int k){
float* gpu_vec;
hipMalloc(&gpu_vec, sizeof(float) * dim * k);
unsigned int workers = 3200;
hiprandState_t* d_state;
int state_size = workers * 32;
hipMalloc(&d_state, sizeof(hiprandState_t) * state_size);
hipLaunchKernelGGL(( init_rand_state), dim3((state_size + 255)/256), dim3(256), 0, 0, d_state, state_size);
hipDeviceSynchronize();
gpuErr(hipPeekAtLastError());
hipLaunchKernelGGL(( init_rand_feature_single), dim3((state_size + 255)/256), dim3(256), 0, 0, d_state, state_size, gpu_vec, dim, k);
hipDeviceSynchronize();
hipMemcpy(feature_vec, gpu_vec, sizeof(float)*dim*k, hipMemcpyDeviceToHost);
gpuErr(hipPeekAtLastError());
hipFree(d_state);
hipFree(gpu_vec);
}
void init_model_single(Mf_info *mf_info, SGD *sgd_info){
hipHostMalloc(&sgd_info->p, sizeof(float) * mf_info->max_user * mf_info->params.k);
hipHostMalloc(&sgd_info->q, sizeof(float) * mf_info->max_item * mf_info->params.k);
gpuErr(hipPeekAtLastError());
init_features_single(sgd_info->p, mf_info->max_user, mf_info->params.k);
init_features_single(sgd_info->q, mf_info->max_item, mf_info->params.k);
hipMalloc(&sgd_info->d_p, sizeof(float) * mf_info->max_user * mf_info->params.k);
hipMalloc(&sgd_info->d_q, sizeof(float) * mf_info->max_item * mf_info->params.k);
hipMemcpy(sgd_info->d_p, sgd_info->p, sizeof(float) * mf_info->max_user * mf_info->params.k, hipMemcpyHostToDevice);
hipMemcpy(sgd_info->d_q, sgd_info->q, sizeof(float) * mf_info->max_item * mf_info->params.k, hipMemcpyHostToDevice);
}
void init_model_half(Mf_info *mf_info, SGD *sgd_info){
hipHostMalloc(&sgd_info->p, sizeof(float) * mf_info->max_user * mf_info->params.k);
hipHostMalloc(&sgd_info->q, sizeof(float) * mf_info->max_item * mf_info->params.k);
gpuErr(hipPeekAtLastError());
hipHostMalloc(&sgd_info->half_p, sizeof(short) * mf_info->max_user * mf_info->params.k);
hipHostMalloc(&sgd_info->half_q, sizeof(short) * mf_info->max_item * mf_info->params.k);
gpuErr(hipPeekAtLastError());
init_features_single(sgd_info->p, mf_info->max_user, mf_info->params.k);
init_features_single(sgd_info->q, mf_info->max_item, mf_info->params.k);
conversion_features_half(sgd_info->half_p, sgd_info->p ,mf_info->max_user, mf_info->params.k);
conversion_features_half(sgd_info->half_q, sgd_info->q ,mf_info->max_item, mf_info->params.k);
hipMalloc(&sgd_info->d_half_p, sizeof(short) * mf_info->max_user * mf_info->params.k);
hipMalloc(&sgd_info->d_half_q, sizeof(short) * mf_info->max_item * mf_info->params.k);
hipMemcpy(sgd_info->d_half_p, sgd_info->half_p, sizeof(short) * mf_info->max_user * mf_info->params.k, hipMemcpyHostToDevice);
hipMemcpy(sgd_info->d_half_q, sgd_info->half_q, sizeof(short) * mf_info->max_item * mf_info->params.k, hipMemcpyHostToDevice);
}
void cpy2grouped_parameters_gpu_for_comparison_indexing(Mf_info *mf_info, SGD *sgd_info){
double cpy2grouped_parameters_exec_time = 0;
unsigned int* d_user_group_idx;
unsigned int* d_item_group_idx;
hipMalloc(&d_user_group_idx, sizeof(unsigned int) * mf_info->max_user);
hipMalloc(&d_item_group_idx, sizeof(unsigned int) * mf_info->max_item);
hipMemcpy(d_user_group_idx, mf_info->user_group_idx, sizeof(unsigned int) * mf_info->max_user, hipMemcpyHostToDevice);
hipMemcpy(d_item_group_idx, mf_info->item_group_idx, sizeof(unsigned int) * mf_info->max_item, hipMemcpyHostToDevice);
for (int i = 0; i < mf_info->params.user_group_num; i++){
unsigned int group_params_size = mf_info->user_group_size[i] * mf_info->params.k;
hipMalloc((void**)&sgd_info->user_group_d_ptr[i], sizeof(__half) * group_params_size);
hipHostMalloc(&sgd_info->user_group_ptr[i], sizeof(__half)*group_params_size);
}
for (int i = 0; i < mf_info->params.item_group_num; i++){
unsigned int group_params_size = mf_info->item_group_size[i] * mf_info->params.k;
hipMalloc((void**)&sgd_info->item_group_d_ptr[i], sizeof(__half) * group_params_size);
hipHostMalloc(&sgd_info->item_group_ptr[i], sizeof(__half)*group_params_size);
}
hipMemcpy(sgd_info->d_user_group_ptr, sgd_info->user_group_d_ptr, sizeof(void**) * mf_info->params.user_group_num, hipMemcpyHostToDevice);
hipMemcpy(sgd_info->d_item_group_ptr, sgd_info->item_group_d_ptr, sizeof(void**) * mf_info->params.item_group_num, hipMemcpyHostToDevice);
gpuErr(hipPeekAtLastError());
unsigned int w_num = 2048;
unsigned int block_size = 256;
if (mf_info->params.k == 128){
hipLaunchKernelGGL(( cpyparams2grouped_params_for_comparison_indexing), dim3((w_num)/(block_size/32)), dim3(block_size), sizeof(unsigned int)*(mf_info->params.user_group_num+1), 0, sgd_info->d_p, (__half**)sgd_info->d_user_group_ptr, mf_info->d_user_group_end_idx, d_user_group_idx, mf_info->d_user2sorted_idx, mf_info->params.k, mf_info->max_user, mf_info->params.user_group_num);
hipLaunchKernelGGL(( cpyparams2grouped_params_for_comparison_indexing), dim3((w_num)/(block_size/32)), dim3(block_size), sizeof(unsigned int)*(mf_info->params.item_group_num+1), 0, sgd_info->d_q, (__half**)sgd_info->d_item_group_ptr, mf_info->d_item_group_end_idx, d_item_group_idx, mf_info->d_item2sorted_idx, mf_info->params.k, mf_info->max_item, mf_info->params.item_group_num);
}else if (mf_info->params.k == 64){
hipLaunchKernelGGL(( cpyparams2grouped_params_for_comparison_indexing_k64), dim3((w_num)/(block_size/32)), dim3(block_size), sizeof(unsigned int)*(mf_info->params.user_group_num+1), 0, sgd_info->d_p, (__half**)sgd_info->d_user_group_ptr, mf_info->d_user_group_end_idx, d_user_group_idx, mf_info->d_user2sorted_idx, mf_info->params.k, mf_info->max_user, mf_info->params.user_group_num);
hipLaunchKernelGGL(( cpyparams2grouped_params_for_comparison_indexing_k64), dim3((w_num)/(block_size/32)), dim3(block_size), sizeof(unsigned int)*(mf_info->params.item_group_num+1), 0, sgd_info->d_q, (__half**)sgd_info->d_item_group_ptr, mf_info->d_item_group_end_idx, d_item_group_idx, mf_info->d_item2sorted_idx, mf_info->params.k, mf_info->max_item, mf_info->params.item_group_num);
}
hipDeviceSynchronize();
gpuErr(hipPeekAtLastError());
hipFree(d_user_group_idx);
hipFree(d_item_group_idx);
hipFree(mf_info->d_user2sorted_idx);
hipFree(mf_info->d_item2sorted_idx);
}
void transform_feature_vector_half2float(short *half_feature, float *float_feature, unsigned int dim, unsigned int k){
float *gpu_float_feature;
half *gpu_half_feature;
hipMalloc(&gpu_half_feature, sizeof(half)*dim*k);
hipMalloc(&gpu_float_feature, sizeof(float)*dim*k);
gpuErr(hipPeekAtLastError());
hipMemcpy(gpu_half_feature, half_feature, sizeof(half)*dim*k, hipMemcpyHostToDevice);
gpuErr(hipPeekAtLastError());
hipLaunchKernelGGL(( transform_half2float), dim3((dim*k+255)/256), dim3(256), 0, 0, gpu_float_feature, gpu_half_feature, dim*k);
hipDeviceSynchronize();
gpuErr(hipPeekAtLastError());
hipMemcpy(float_feature, gpu_float_feature, sizeof(float)*dim*k, hipMemcpyDeviceToHost);
gpuErr(hipPeekAtLastError());
hipFree(gpu_float_feature);
hipFree(gpu_half_feature);
gpuErr(hipPeekAtLastError());
}
void conversion_features_half(short *feature_vec, float *feature_vec_from ,unsigned int dim, unsigned int k){
__half* gpu_vec;
float* gpu_from_vec;
hipMalloc(&gpu_vec, sizeof(__half) * dim * k);
hipMalloc(&gpu_from_vec, sizeof(float) * dim * k);
hipMemcpy(gpu_from_vec, feature_vec_from, sizeof(float) * dim * k, hipMemcpyHostToDevice);
gpuErr(hipPeekAtLastError());
hipLaunchKernelGGL(( transform_float2half), dim3((dim * k + 255) / 256), dim3(256), 0, 0, gpu_vec, gpu_from_vec, dim * k);
hipDeviceSynchronize();
gpuErr(hipPeekAtLastError());
hipMemcpy(feature_vec, gpu_vec, sizeof(__half) * dim * k, hipMemcpyDeviceToHost);
hipFree(gpu_vec);
hipFree(gpu_from_vec);
}
void transition_params_half2float(Mf_info *mf_info, SGD *sgd_info){
int num_groups = 10000;
hipLaunchKernelGGL(( mem_cpy_fp16tofp32), dim3(num_groups), dim3(512), 0, 0, sgd_info->d_p, sgd_info->d_half_p, mf_info->params.k * mf_info->max_user);
hipLaunchKernelGGL(( mem_cpy_fp16tofp32), dim3(num_groups), dim3(512), 0, 0, sgd_info->d_q, sgd_info->d_half_q, mf_info->params.k * mf_info->max_item);
hipDeviceSynchronize();
gpuErr(hipPeekAtLastError());
} | ffdf2cf121e177468f22539d625c5a7255eb983b.cu | #include <cuda_fp16.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <chrono>
#include "common_struct.h"
#include "common.h"
#include "model_init.h"
#include <iostream>
using namespace std;
__global__ void init_rand_state(curandState*state, int size)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < size)curand_init(clock() + tid, tid, 0, &state[tid]);
// if(tid < size)curand_init(1, tid, 0, &state[tid]);
}
__global__ void init_rand_feature_single(curandState*state, unsigned int state_size, float* array , unsigned int dim, unsigned int k)
{
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int number_threads = gridDim.x*blockDim.x;
if (state_size > tid){
for (unsigned int i = tid; i < dim * k; i+= state_size){
array[i] = (float)((curand_normal_double(&state[tid])* 0.01)) ;
}
}
}
__global__ void cpyparams2grouped_params_for_comparison_indexing(float* original_params, __half** converted_params, unsigned int *group_end_idx, unsigned int *entity2group, unsigned int *entity2sorted_idx, unsigned int k, unsigned int n, unsigned int group_num){
extern __shared__ unsigned int end_idx_s[];
for (int i = threadIdx.x; i < group_num; i+= blockDim.x){
end_idx_s[i+1] = group_end_idx[i];
}
if (threadIdx.x == 0){
end_idx_s[0] = -1;
}
__syncthreads();
unsigned int g_wid = (blockIdx.x*blockDim.x + threadIdx.x)/32;
unsigned int num_w = gridDim.x*blockDim.x/32;
unsigned int lane_id = threadIdx.x%32;
for (; g_wid < n; g_wid += num_w){
unsigned int group_idx = entity2group[g_wid];
unsigned int base_idx = (entity2sorted_idx[g_wid]- (end_idx_s[group_idx] + 1)) * k;
((__half*)converted_params[group_idx])[base_idx + lane_id] = __float2half_rn(original_params[g_wid * k + lane_id]);
((__half*)converted_params[group_idx])[base_idx + lane_id + 32] = __float2half_rn(original_params[g_wid * k + lane_id + 32]);
((__half*)converted_params[group_idx])[base_idx + lane_id + 64] = __float2half_rn(original_params[g_wid * k + lane_id + 64]);
((__half*)converted_params[group_idx])[base_idx + lane_id + 96] = __float2half_rn(original_params[g_wid * k + lane_id + 96]);
}
}
__global__ void cpyparams2grouped_params_for_comparison_indexing_k64(float* original_params, __half** converted_params, unsigned int *group_end_idx, unsigned int *entity2group, unsigned int *entity2sorted_idx, unsigned int k, unsigned int n, unsigned int group_num){
extern __shared__ unsigned int end_idx_s[];
// if (threadIdx.x < group_num){ end_idx_s[threadIdx.x+1] = group_end_idx[threadIdx.x];}
for (int i = threadIdx.x; i < group_num; i+= blockDim.x){
end_idx_s[i+1] = group_end_idx[i];
}
if (threadIdx.x == 0){
end_idx_s[0] = -1;
}
__syncthreads();
unsigned int g_wid = (blockIdx.x*blockDim.x + threadIdx.x)/32;
unsigned int num_w = gridDim.x*blockDim.x/32;
unsigned int lane_id = threadIdx.x%32;
for (; g_wid < n; g_wid += num_w){
unsigned int group_idx = entity2group[g_wid];
unsigned int base_idx = (entity2sorted_idx[g_wid]- (end_idx_s[group_idx] + 1)) * k;
((__half*)converted_params[group_idx])[base_idx + lane_id] = __float2half_rn(original_params[g_wid * k + lane_id]);
((__half*)converted_params[group_idx])[base_idx + lane_id + 32] = __float2half_rn(original_params[g_wid * k + lane_id + 32]);
}
}
__global__ void transform_half2float(float *gpu_float_feature, half *gpu_half_feature, unsigned int vec_size)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
int number_threads = gridDim.x*blockDim.x;
for (unsigned int i = tid; i < vec_size; i += number_threads){
if (i < vec_size)
gpu_float_feature[i] = __half2float(gpu_half_feature[i]);
}
}
__global__ void transform_float2half(__half* half_feature, float *gpu_float_feature, unsigned int vec_size){
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
int number_threads = gridDim.x*blockDim.x;
for (unsigned int i = tid; i < vec_size; i += number_threads){
if (i < vec_size){
half_feature[i] = __float2half_rn(gpu_float_feature[i]);
}
}
}
__global__ void mem_cpy_fp16tofp32(float* out, __half* in, int n){
int i = threadIdx.x + blockIdx.x * blockDim.x;
for (; i < n; i += gridDim.x * blockDim.x)
out[i] = __half2float(in[i]);
}
void init_features_single(float *feature_vec, unsigned int dim, unsigned int k){
float* gpu_vec;
cudaMalloc(&gpu_vec, sizeof(float) * dim * k);
unsigned int workers = 3200;
curandState* d_state;
int state_size = workers * 32;
cudaMalloc(&d_state, sizeof(curandState) * state_size);
init_rand_state<<<(state_size + 255)/256, 256>>>(d_state, state_size);
cudaDeviceSynchronize();
gpuErr(cudaPeekAtLastError());
init_rand_feature_single<<<(state_size + 255)/256, 256>>>(d_state, state_size, gpu_vec, dim, k);
cudaDeviceSynchronize();
cudaMemcpy(feature_vec, gpu_vec, sizeof(float)*dim*k, cudaMemcpyDeviceToHost);
gpuErr(cudaPeekAtLastError());
cudaFree(d_state);
cudaFree(gpu_vec);
}
void init_model_single(Mf_info *mf_info, SGD *sgd_info){
cudaMallocHost(&sgd_info->p, sizeof(float) * mf_info->max_user * mf_info->params.k);
cudaMallocHost(&sgd_info->q, sizeof(float) * mf_info->max_item * mf_info->params.k);
gpuErr(cudaPeekAtLastError());
init_features_single(sgd_info->p, mf_info->max_user, mf_info->params.k);
init_features_single(sgd_info->q, mf_info->max_item, mf_info->params.k);
cudaMalloc(&sgd_info->d_p, sizeof(float) * mf_info->max_user * mf_info->params.k);
cudaMalloc(&sgd_info->d_q, sizeof(float) * mf_info->max_item * mf_info->params.k);
cudaMemcpy(sgd_info->d_p, sgd_info->p, sizeof(float) * mf_info->max_user * mf_info->params.k, cudaMemcpyHostToDevice);
cudaMemcpy(sgd_info->d_q, sgd_info->q, sizeof(float) * mf_info->max_item * mf_info->params.k, cudaMemcpyHostToDevice);
}
void init_model_half(Mf_info *mf_info, SGD *sgd_info){
cudaMallocHost(&sgd_info->p, sizeof(float) * mf_info->max_user * mf_info->params.k);
cudaMallocHost(&sgd_info->q, sizeof(float) * mf_info->max_item * mf_info->params.k);
gpuErr(cudaPeekAtLastError());
cudaMallocHost(&sgd_info->half_p, sizeof(short) * mf_info->max_user * mf_info->params.k);
cudaMallocHost(&sgd_info->half_q, sizeof(short) * mf_info->max_item * mf_info->params.k);
gpuErr(cudaPeekAtLastError());
init_features_single(sgd_info->p, mf_info->max_user, mf_info->params.k);
init_features_single(sgd_info->q, mf_info->max_item, mf_info->params.k);
conversion_features_half(sgd_info->half_p, sgd_info->p ,mf_info->max_user, mf_info->params.k);
conversion_features_half(sgd_info->half_q, sgd_info->q ,mf_info->max_item, mf_info->params.k);
cudaMalloc(&sgd_info->d_half_p, sizeof(short) * mf_info->max_user * mf_info->params.k);
cudaMalloc(&sgd_info->d_half_q, sizeof(short) * mf_info->max_item * mf_info->params.k);
cudaMemcpy(sgd_info->d_half_p, sgd_info->half_p, sizeof(short) * mf_info->max_user * mf_info->params.k, cudaMemcpyHostToDevice);
cudaMemcpy(sgd_info->d_half_q, sgd_info->half_q, sizeof(short) * mf_info->max_item * mf_info->params.k, cudaMemcpyHostToDevice);
}
void cpy2grouped_parameters_gpu_for_comparison_indexing(Mf_info *mf_info, SGD *sgd_info){
double cpy2grouped_parameters_exec_time = 0;
unsigned int* d_user_group_idx;
unsigned int* d_item_group_idx;
cudaMalloc(&d_user_group_idx, sizeof(unsigned int) * mf_info->max_user);
cudaMalloc(&d_item_group_idx, sizeof(unsigned int) * mf_info->max_item);
cudaMemcpy(d_user_group_idx, mf_info->user_group_idx, sizeof(unsigned int) * mf_info->max_user, cudaMemcpyHostToDevice);
cudaMemcpy(d_item_group_idx, mf_info->item_group_idx, sizeof(unsigned int) * mf_info->max_item, cudaMemcpyHostToDevice);
for (int i = 0; i < mf_info->params.user_group_num; i++){
unsigned int group_params_size = mf_info->user_group_size[i] * mf_info->params.k;
cudaMalloc((void**)&sgd_info->user_group_d_ptr[i], sizeof(__half) * group_params_size);
cudaMallocHost(&sgd_info->user_group_ptr[i], sizeof(__half)*group_params_size);
}
for (int i = 0; i < mf_info->params.item_group_num; i++){
unsigned int group_params_size = mf_info->item_group_size[i] * mf_info->params.k;
cudaMalloc((void**)&sgd_info->item_group_d_ptr[i], sizeof(__half) * group_params_size);
cudaMallocHost(&sgd_info->item_group_ptr[i], sizeof(__half)*group_params_size);
}
cudaMemcpy(sgd_info->d_user_group_ptr, sgd_info->user_group_d_ptr, sizeof(void**) * mf_info->params.user_group_num, cudaMemcpyHostToDevice);
cudaMemcpy(sgd_info->d_item_group_ptr, sgd_info->item_group_d_ptr, sizeof(void**) * mf_info->params.item_group_num, cudaMemcpyHostToDevice);
gpuErr(cudaPeekAtLastError());
unsigned int w_num = 2048;
unsigned int block_size = 256;
if (mf_info->params.k == 128){
cpyparams2grouped_params_for_comparison_indexing<<<(w_num)/(block_size/32), block_size, sizeof(unsigned int)*(mf_info->params.user_group_num+1)>>>(sgd_info->d_p, (__half**)sgd_info->d_user_group_ptr, mf_info->d_user_group_end_idx, d_user_group_idx, mf_info->d_user2sorted_idx, mf_info->params.k, mf_info->max_user, mf_info->params.user_group_num);
cpyparams2grouped_params_for_comparison_indexing<<<(w_num)/(block_size/32), block_size, sizeof(unsigned int)*(mf_info->params.item_group_num+1)>>>(sgd_info->d_q, (__half**)sgd_info->d_item_group_ptr, mf_info->d_item_group_end_idx, d_item_group_idx, mf_info->d_item2sorted_idx, mf_info->params.k, mf_info->max_item, mf_info->params.item_group_num);
}else if (mf_info->params.k == 64){
cpyparams2grouped_params_for_comparison_indexing_k64<<<(w_num)/(block_size/32), block_size, sizeof(unsigned int)*(mf_info->params.user_group_num+1)>>>(sgd_info->d_p, (__half**)sgd_info->d_user_group_ptr, mf_info->d_user_group_end_idx, d_user_group_idx, mf_info->d_user2sorted_idx, mf_info->params.k, mf_info->max_user, mf_info->params.user_group_num);
cpyparams2grouped_params_for_comparison_indexing_k64<<<(w_num)/(block_size/32), block_size, sizeof(unsigned int)*(mf_info->params.item_group_num+1)>>>(sgd_info->d_q, (__half**)sgd_info->d_item_group_ptr, mf_info->d_item_group_end_idx, d_item_group_idx, mf_info->d_item2sorted_idx, mf_info->params.k, mf_info->max_item, mf_info->params.item_group_num);
}
cudaDeviceSynchronize();
gpuErr(cudaPeekAtLastError());
cudaFree(d_user_group_idx);
cudaFree(d_item_group_idx);
cudaFree(mf_info->d_user2sorted_idx);
cudaFree(mf_info->d_item2sorted_idx);
}
void transform_feature_vector_half2float(short *half_feature, float *float_feature, unsigned int dim, unsigned int k){
float *gpu_float_feature;
half *gpu_half_feature;
cudaMalloc(&gpu_half_feature, sizeof(half)*dim*k);
cudaMalloc(&gpu_float_feature, sizeof(float)*dim*k);
gpuErr(cudaPeekAtLastError());
cudaMemcpy(gpu_half_feature, half_feature, sizeof(half)*dim*k, cudaMemcpyHostToDevice);
gpuErr(cudaPeekAtLastError());
transform_half2float<<<(dim*k+255)/256, 256>>>(gpu_float_feature, gpu_half_feature, dim*k);
cudaDeviceSynchronize();
gpuErr(cudaPeekAtLastError());
cudaMemcpy(float_feature, gpu_float_feature, sizeof(float)*dim*k, cudaMemcpyDeviceToHost);
gpuErr(cudaPeekAtLastError());
cudaFree(gpu_float_feature);
cudaFree(gpu_half_feature);
gpuErr(cudaPeekAtLastError());
}
void conversion_features_half(short *feature_vec, float *feature_vec_from ,unsigned int dim, unsigned int k){
__half* gpu_vec;
float* gpu_from_vec;
cudaMalloc(&gpu_vec, sizeof(__half) * dim * k);
cudaMalloc(&gpu_from_vec, sizeof(float) * dim * k);
cudaMemcpy(gpu_from_vec, feature_vec_from, sizeof(float) * dim * k, cudaMemcpyHostToDevice);
gpuErr(cudaPeekAtLastError());
transform_float2half<<< (dim * k + 255) / 256, 256>>>(gpu_vec, gpu_from_vec, dim * k);
cudaDeviceSynchronize();
gpuErr(cudaPeekAtLastError());
cudaMemcpy(feature_vec, gpu_vec, sizeof(__half) * dim * k, cudaMemcpyDeviceToHost);
cudaFree(gpu_vec);
cudaFree(gpu_from_vec);
}
void transition_params_half2float(Mf_info *mf_info, SGD *sgd_info){
int num_groups = 10000;
mem_cpy_fp16tofp32<<<num_groups, 512>>>(sgd_info->d_p, sgd_info->d_half_p, mf_info->params.k * mf_info->max_user);
mem_cpy_fp16tofp32<<<num_groups, 512>>>(sgd_info->d_q, sgd_info->d_half_q, mf_info->params.k * mf_info->max_item);
cudaDeviceSynchronize();
gpuErr(cudaPeekAtLastError());
} |
2a5dbcc41c32f8eaaa7fce9be7f8c43285942d77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
// includes, kernels
#include "vector_reduction_kernel.hip"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
float* read_array(const char* filename, int len) {
float *x = (float*) malloc(len * sizeof(float));
FILE *fp = fopen(filename, "r");
for (int i = 0; i < len; i++) {
fscanf(fp, "%f", &x[i]);
}
fclose(fp);
return x;
}
float computeOnDevice(float* h_data, int num_elements);
extern "C" void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Run test
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
int num_elements = NUM_ELEMENTS;
float* h_data=read_array("problem2.inp",num_elements);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Read the input data array from the given file.
// compute reference solution
float reference = 1.0f;
computeGold(&reference , h_data, num_elements);
// **===-------- Modify the body of this function -----------===**
float result = computeOnDevice(h_data, num_elements);
// **===-----------------------------------------------------------===**
// Run accuracy test
float epsilon = 0.0001f;
unsigned int result_regtest = (abs(result - reference) <= epsilon);
if(!result_regtest)printf("Test failed device: %f host: %f\n",result,reference);//This shouldnt print in working case
printf("%f\n",result);
// cleanup memory
free( h_data);
return 0;
}
// **===----------------- Modify this function ---------------------===**
// Take h_data from host, copies it to device, setup grid and thread
// dimensions, excutes kernel function, and copy result of scan back
// to h_data.
// Note: float* h_data is both the input and the output of this function.
float computeOnDevice(float* h_data, int num_elements)
{
//Allocate memory on the device
float *d_data;
hipMalloc((void**) &d_data, sizeof(float)*num_elements);
hipMemcpy(d_data, h_data, sizeof(float)*num_elements, hipMemcpyHostToDevice);
//launch the kernel
int blockSize = 1, threadSize = num_elements/2;
int element_size = num_elements;
//launch the kernel with half of the thread size
hipLaunchKernelGGL(( reduction) , dim3(blockSize), dim3(threadSize), 0, 0, d_data,element_size);
//Copy the data back to host
hipMemcpy(h_data,d_data,sizeof(float), hipMemcpyDeviceToHost);
return h_data[0];
}
| 2a5dbcc41c32f8eaaa7fce9be7f8c43285942d77.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
// includes, kernels
#include "vector_reduction_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
float* read_array(const char* filename, int len) {
float *x = (float*) malloc(len * sizeof(float));
FILE *fp = fopen(filename, "r");
for (int i = 0; i < len; i++) {
fscanf(fp, "%f", &x[i]);
}
fclose(fp);
return x;
}
float computeOnDevice(float* h_data, int num_elements);
extern "C" void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//! Run test
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
int num_elements = NUM_ELEMENTS;
float* h_data=read_array("problem2.inp",num_elements);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Read the input data array from the given file.
// compute reference solution
float reference = 1.0f;
computeGold(&reference , h_data, num_elements);
// **===-------- Modify the body of this function -----------===**
float result = computeOnDevice(h_data, num_elements);
// **===-----------------------------------------------------------===**
// Run accuracy test
float epsilon = 0.0001f;
unsigned int result_regtest = (abs(result - reference) <= epsilon);
if(!result_regtest)printf("Test failed device: %f host: %f\n",result,reference);//This shouldnt print in working case
printf("%f\n",result);
// cleanup memory
free( h_data);
return 0;
}
// **===----------------- Modify this function ---------------------===**
// Take h_data from host, copies it to device, setup grid and thread
// dimensions, excutes kernel function, and copy result of scan back
// to h_data.
// Note: float* h_data is both the input and the output of this function.
float computeOnDevice(float* h_data, int num_elements)
{
//Allocate memory on the device
float *d_data;
cudaMalloc((void**) &d_data, sizeof(float)*num_elements);
cudaMemcpy(d_data, h_data, sizeof(float)*num_elements, cudaMemcpyHostToDevice);
//launch the kernel
int blockSize = 1, threadSize = num_elements/2;
int element_size = num_elements;
//launch the kernel with half of the thread size
reduction <<<blockSize, threadSize>>>(d_data,element_size);
//Copy the data back to host
cudaMemcpy(h_data,d_data,sizeof(float), cudaMemcpyDeviceToHost);
return h_data[0];
}
|
afd5b42b737a5cd908dc9de9eeb0bb86d6b716c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void LRNFillScale(const int_tp nthreads, const Dtype* const in,
const int_tp num, const int_tp channels,
const int_tp height, const int_tp width,
const int_tp size, const Dtype alpha_over_size,
const Dtype k, Dtype* const scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int_tp w = index % width;
const int_tp h = (index / width) % height;
const int_tp n = index / width / height;
const int_tp offset = (n * channels * height + h) * width + w;
const int_tp step = height * width;
const Dtype* const in_off = in + offset;
Dtype* const scale_off = scale + offset;
int_tp head = 0;
const int_tp pre_pad = (size - 1) / 2;
const int_tp post_pad = size - pre_pad - 1;
Dtype accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
}
}
#endif // USE_ROCM
template<typename Dtype>
void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelForward_gpu(bottom, top);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelForward(bottom, top);
break;
default:
LOG(FATAL)<< "Unknown normalization region.";
}
}
// TODO: check if it would be faster to just put it into the previous kernel.
#ifdef USE_ROCM
template<typename Dtype>
__global__ void LRNComputeOutput(const int_tp nthreads, const Dtype* const in,
const Dtype* const scale,
const Dtype negative_beta, Dtype* const out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
#endif // USE_ROCM
template<typename Dtype>
void LRNLayer<Dtype>::CrossChannelForward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// First, compute scale
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int_tp n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
LRNFillScale CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads),
CAFFE_CUDA_NUM_THREADS)(
n_threads, bottom_data, num_, channels_, height_,
width_, size_,
alpha_ / size_, k_, scale_data);
CUDA_POST_KERNEL_CHECK;
n_threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeOutput CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads),
CAFFE_CUDA_NUM_THREADS)(
n_threads, bottom_data, scale_data, -beta_, top_data);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
int_tp n_threads = num_ * height_ * width_;
cl_uint argIdx = 0;
size_t global_work_size_[1] = {(size_t)n_threads};
if (this->phase_ == caffe::TRAIN) {
viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel(
CL_KERNEL_SELECT("lrn_full"));
oclk_lrn_fill.arg(argIdx++, n_threads);
oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) bottom_data, &ctx));
oclk_lrn_fill.arg(argIdx++, num_);
oclk_lrn_fill.arg(argIdx++, channels_);
oclk_lrn_fill.arg(argIdx++, height_);
oclk_lrn_fill.arg(argIdx++, width_);
oclk_lrn_fill.arg(argIdx++, size_);
oclk_lrn_fill.arg(argIdx++, alpha_ / size_);
oclk_lrn_fill.arg(argIdx++, k_);
oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) scale_data, &ctx));
oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) top_data, &ctx));
oclk_lrn_fill.arg(argIdx++, -beta_);
OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(),
oclk_lrn_fill.handle().get(), 1, NULL,
global_work_size_, NULL, 0, NULL,
NULL));
} else {
viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel(
CL_KERNEL_SELECT("lrn_full_no_scale"));
cl_uint argIdx = 0;
oclk_lrn_fill.arg(argIdx++, n_threads);
oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) bottom_data, &ctx));
oclk_lrn_fill.arg(argIdx++, num_);
oclk_lrn_fill.arg(argIdx++, channels_);
oclk_lrn_fill.arg(argIdx++, height_);
oclk_lrn_fill.arg(argIdx++, width_);
oclk_lrn_fill.arg(argIdx++, size_);
oclk_lrn_fill.arg(argIdx++, alpha_ / size_);
oclk_lrn_fill.arg(argIdx++, k_);
oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) top_data, &ctx));
oclk_lrn_fill.arg(argIdx++, -beta_);
OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(),
oclk_lrn_fill.handle().get(), 1, NULL,
global_work_size_, NULL, 0, NULL,
NULL));
}
#endif // USE_GREENTEA
}
}
template void LRNLayer<float>::CrossChannelForward_gpu(
const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top);
template void LRNLayer<double>::CrossChannelForward_gpu(
const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top);
template<typename Dtype>
void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelBackward_gpu(top, propagate_down, bottom);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelBackward(top, propagate_down, bottom);
break;
default:
LOG(FATAL)<< "Unknown normalization region.";
}
}
#ifdef USE_ROCM
template<typename Dtype>
__global__ void LRNComputeDiff(const int_tp nthreads,
const Dtype* const bottom_data,
const Dtype* const top_data,
const Dtype* const scale,
const Dtype* const top_diff, const int_tp num,
const int_tp channels, const int_tp height,
const int_tp width, const int_tp size,
const Dtype negative_beta,
const Dtype cache_ratio,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int_tp w = index % width;
const int_tp h = (index / width) % height;
const int_tp n = index / width / height;
const int_tp offset = (n * channels * height + h) * width + w;
const int_tp step = height * width;
const Dtype* const bottom_off = bottom_data + offset;
const Dtype* const top_off = top_data + offset;
const Dtype* const scale_off = scale + offset;
const Dtype* const top_diff_off = top_diff + offset;
Dtype* const bottom_diff_off = bottom_diff + offset;
int_tp head = 0;
const int_tp pre_pad = size - (size + 1) / 2;
const int_tp post_pad = size - pre_pad - 1;
Dtype accum_ratio = 0;
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step]
/ scale_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step]
/ scale_off[head * step];
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step]
* top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)
* step] * pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step]
* top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)
* step] * pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
#endif // USE_ROCM
template<typename Dtype>
void LRNLayer<Dtype>::CrossChannelBackward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
int_tp n_threads = num_ * height_ * width_;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeDiff CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads),
CAFFE_CUDA_NUM_THREADS)(
n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(),
scale_.gpu_data(), top[0]->gpu_diff(), num_,
channels_, height_, width_,
size_, -beta_, Dtype(2. * alpha_ * beta_ / size_),
bottom[0]->mutable_gpu_diff());
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_lrn = program.get_kernel(
CL_KERNEL_SELECT("lrn_compute_diff"));
viennacl::ocl::enqueue(
oclk_lrn(n_threads, WrapHandle((cl_mem) (bottom[0]->gpu_data()), &ctx),
WrapHandle((cl_mem) (top[0]->gpu_data()), &ctx),
WrapHandle((cl_mem) (scale_.gpu_data()), &ctx),
WrapHandle((cl_mem) (top[0]->gpu_diff()), &ctx), num_,
channels_, height_, width_, size_, -beta_,
Dtype(2. * alpha_ * beta_ / size_),
WrapHandle((cl_mem) (bottom[0]->mutable_gpu_diff()), &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
template void LRNLayer<float>::CrossChannelBackward_gpu(
const vector<Blob<float>*>& top, const vector<bool>& propagate_down,
const vector<Blob<float>*>& bottom);
template void LRNLayer<double>::CrossChannelBackward_gpu(
const vector<Blob<double>*>& top, const vector<bool>& propagate_down,
const vector<Blob<double>*>& bottom);
INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer);
} // namespace caffe
| afd5b42b737a5cd908dc9de9eeb0bb86d6b716c7.cu | #include <vector>
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void LRNFillScale(const int_tp nthreads, const Dtype* const in,
const int_tp num, const int_tp channels,
const int_tp height, const int_tp width,
const int_tp size, const Dtype alpha_over_size,
const Dtype k, Dtype* const scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int_tp w = index % width;
const int_tp h = (index / width) % height;
const int_tp n = index / width / height;
const int_tp offset = (n * channels * height + h) * width + w;
const int_tp step = height * width;
const Dtype* const in_off = in + offset;
Dtype* const scale_off = scale + offset;
int_tp head = 0;
const int_tp pre_pad = (size - 1) / 2;
const int_tp post_pad = size - pre_pad - 1;
Dtype accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
}
}
#endif // USE_CUDA
template<typename Dtype>
void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelForward_gpu(bottom, top);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelForward(bottom, top);
break;
default:
LOG(FATAL)<< "Unknown normalization region.";
}
}
// TODO: check if it would be faster to just put it into the previous kernel.
#ifdef USE_CUDA
template<typename Dtype>
__global__ void LRNComputeOutput(const int_tp nthreads, const Dtype* const in,
const Dtype* const scale,
const Dtype negative_beta, Dtype* const out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
#endif // USE_CUDA
template<typename Dtype>
void LRNLayer<Dtype>::CrossChannelForward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// First, compute scale
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int_tp n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
LRNFillScale CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads),
CAFFE_CUDA_NUM_THREADS)(
n_threads, bottom_data, num_, channels_, height_,
width_, size_,
alpha_ / size_, k_, scale_data);
CUDA_POST_KERNEL_CHECK;
n_threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeOutput CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads),
CAFFE_CUDA_NUM_THREADS)(
n_threads, bottom_data, scale_data, -beta_, top_data);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
int_tp n_threads = num_ * height_ * width_;
cl_uint argIdx = 0;
size_t global_work_size_[1] = {(size_t)n_threads};
if (this->phase_ == caffe::TRAIN) {
viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel(
CL_KERNEL_SELECT("lrn_full"));
oclk_lrn_fill.arg(argIdx++, n_threads);
oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) bottom_data, &ctx));
oclk_lrn_fill.arg(argIdx++, num_);
oclk_lrn_fill.arg(argIdx++, channels_);
oclk_lrn_fill.arg(argIdx++, height_);
oclk_lrn_fill.arg(argIdx++, width_);
oclk_lrn_fill.arg(argIdx++, size_);
oclk_lrn_fill.arg(argIdx++, alpha_ / size_);
oclk_lrn_fill.arg(argIdx++, k_);
oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) scale_data, &ctx));
oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) top_data, &ctx));
oclk_lrn_fill.arg(argIdx++, -beta_);
OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(),
oclk_lrn_fill.handle().get(), 1, NULL,
global_work_size_, NULL, 0, NULL,
NULL));
} else {
viennacl::ocl::kernel &oclk_lrn_fill = program.get_kernel(
CL_KERNEL_SELECT("lrn_full_no_scale"));
cl_uint argIdx = 0;
oclk_lrn_fill.arg(argIdx++, n_threads);
oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) bottom_data, &ctx));
oclk_lrn_fill.arg(argIdx++, num_);
oclk_lrn_fill.arg(argIdx++, channels_);
oclk_lrn_fill.arg(argIdx++, height_);
oclk_lrn_fill.arg(argIdx++, width_);
oclk_lrn_fill.arg(argIdx++, size_);
oclk_lrn_fill.arg(argIdx++, alpha_ / size_);
oclk_lrn_fill.arg(argIdx++, k_);
oclk_lrn_fill.arg(argIdx++, WrapHandle((cl_mem) top_data, &ctx));
oclk_lrn_fill.arg(argIdx++, -beta_);
OCL_CHECK(clEnqueueNDRangeKernel(ctx.get_queue().handle().get(),
oclk_lrn_fill.handle().get(), 1, NULL,
global_work_size_, NULL, 0, NULL,
NULL));
}
#endif // USE_GREENTEA
}
}
template void LRNLayer<float>::CrossChannelForward_gpu(
const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top);
template void LRNLayer<double>::CrossChannelForward_gpu(
const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top);
template<typename Dtype>
void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelBackward_gpu(top, propagate_down, bottom);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelBackward(top, propagate_down, bottom);
break;
default:
LOG(FATAL)<< "Unknown normalization region.";
}
}
#ifdef USE_CUDA
template<typename Dtype>
__global__ void LRNComputeDiff(const int_tp nthreads,
const Dtype* const bottom_data,
const Dtype* const top_data,
const Dtype* const scale,
const Dtype* const top_diff, const int_tp num,
const int_tp channels, const int_tp height,
const int_tp width, const int_tp size,
const Dtype negative_beta,
const Dtype cache_ratio,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int_tp w = index % width;
const int_tp h = (index / width) % height;
const int_tp n = index / width / height;
const int_tp offset = (n * channels * height + h) * width + w;
const int_tp step = height * width;
const Dtype* const bottom_off = bottom_data + offset;
const Dtype* const top_off = top_data + offset;
const Dtype* const scale_off = scale + offset;
const Dtype* const top_diff_off = top_diff + offset;
Dtype* const bottom_diff_off = bottom_diff + offset;
int_tp head = 0;
const int_tp pre_pad = size - (size + 1) / 2;
const int_tp post_pad = size - pre_pad - 1;
Dtype accum_ratio = 0;
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step]
/ scale_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step]
/ scale_off[head * step];
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step]
* top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)
* step] * pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step]
* top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad)
* step] * pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
#endif // USE_CUDA
template<typename Dtype>
void LRNLayer<Dtype>::CrossChannelBackward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
int_tp n_threads = num_ * height_ * width_;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeDiff CUDA_KERNEL(CAFFE_GET_BLOCKS(n_threads),
CAFFE_CUDA_NUM_THREADS)(
n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(),
scale_.gpu_data(), top[0]->gpu_diff(), num_,
channels_, height_, width_,
size_, -beta_, Dtype(2. * alpha_ * beta_ / size_),
bottom[0]->mutable_gpu_diff());
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_lrn = program.get_kernel(
CL_KERNEL_SELECT("lrn_compute_diff"));
viennacl::ocl::enqueue(
oclk_lrn(n_threads, WrapHandle((cl_mem) (bottom[0]->gpu_data()), &ctx),
WrapHandle((cl_mem) (top[0]->gpu_data()), &ctx),
WrapHandle((cl_mem) (scale_.gpu_data()), &ctx),
WrapHandle((cl_mem) (top[0]->gpu_diff()), &ctx), num_,
channels_, height_, width_, size_, -beta_,
Dtype(2. * alpha_ * beta_ / size_),
WrapHandle((cl_mem) (bottom[0]->mutable_gpu_diff()), &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
template void LRNLayer<float>::CrossChannelBackward_gpu(
const vector<Blob<float>*>& top, const vector<bool>& propagate_down,
const vector<Blob<float>*>& bottom);
template void LRNLayer<double>::CrossChannelBackward_gpu(
const vector<Blob<double>*>& top, const vector<bool>& propagate_down,
const vector<Blob<double>*>& bottom);
INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer);
} // namespace caffe
|
580d81c3d76662c6607028b27a551bc2912a1b30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void saxpy(int n, double a, double *x, double *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
#define BLOCK_SIZE 64
int main(int argc, const char * argv[])
{
if (argc != 2) {
fprintf(stderr, "usage: %s <n>\n", argv[0]);
}
int N = atoi(argv[1]);
double *x, *y, *d_x, *d_y;
x = (double*)malloc(N*sizeof(double));
y = (double*)malloc(N*sizeof(double));
hipMalloc(&d_x, N*sizeof(double));
hipMalloc(&d_y, N*sizeof(double));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipMemcpy(d_x, x, N*sizeof(double), hipMemcpyHostToDevice);
hipEventRecord(stop);
hipMemcpy(d_y, y, N*sizeof(double), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(double), hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
double maxError = 0.0f;
for (int i = 0; i < N; i++) {
maxError = max(maxError, abs(y[i]-4.0f));
}
if (maxError > 0)
printf("Max error: %f\n", maxError);
printf("%f\n", N*sizeof(double)/milliseconds/1e6);
}
| 580d81c3d76662c6607028b27a551bc2912a1b30.cu | #include <stdio.h>
__global__
void saxpy(int n, double a, double *x, double *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
#define BLOCK_SIZE 64
int main(int argc, const char * argv[])
{
if (argc != 2) {
fprintf(stderr, "usage: %s <n>\n", argv[0]);
}
int N = atoi(argv[1]);
double *x, *y, *d_x, *d_y;
x = (double*)malloc(N*sizeof(double));
y = (double*)malloc(N*sizeof(double));
cudaMalloc(&d_x, N*sizeof(double));
cudaMalloc(&d_y, N*sizeof(double));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(d_x, x, N*sizeof(double), cudaMemcpyHostToDevice);
cudaEventRecord(stop);
cudaMemcpy(d_y, y, N*sizeof(double), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(double), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
double maxError = 0.0f;
for (int i = 0; i < N; i++) {
maxError = max(maxError, abs(y[i]-4.0f));
}
if (maxError > 0)
printf("Max error: %f\n", maxError);
printf("%f\n", N*sizeof(double)/milliseconds/1e6);
}
|
14b53efe50e13b88c253cdd8ca121b819eed60aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* random3.cu */
#include <unistd.h>
#include <stdio.h>
/* we need these includes for CUDA's random number stuff */
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define nBlocks 2
#define nThreads 3
#define N (nBlocks*nThreads)
#define MAX 100
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, unsigned long long *seeds,
unsigned long long *sequences, hiprandState_t* states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
seeds[idx] = seed;
sequences[idx] = blockIdx.x;
/* we have to initialize the state */
hiprand_init(seeds[idx], /* the seed can be the same for each core, here we pass the time in from the CPU */
sequences[idx], /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[idx]);
}
/* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */
__global__ void randoms(hiprandState_t* states, unsigned int* numbers) {
/* hiprand works like rand - except that it takes a state as a parameter */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
numbers[idx] = hiprand(&(states[idx])) % MAX;
}
int main(int argc, char* argv[]) {
int nRandomNumbers;
if (argc > 1) {
nRandomNumbers = atoi(argv[1]);
} else {
nRandomNumbers = 20;
}
/* CUDA's random number library uses hiprandState_t to keep track of the seed value
we will store a random state for every thread */
hiprandState_t* states;
unsigned long long *seeds;
unsigned long long *sequences;
hiprandState_t* h_states;
unsigned long long *h_seeds;
unsigned long long *h_sequences;
/* allocate space on the GPU for the random states */
hipMalloc((void**) &seeds, N * sizeof(unsigned long long));
hipMalloc((void**) &sequences, N * sizeof(unsigned long long));
hipMalloc((void**) &states, N * sizeof(hiprandState_t));
/* invoke the GPU to initialize all of the random states */
hipLaunchKernelGGL(( init), dim3(nBlocks), dim3(nThreads), 0, 0, 0, seeds, sequences, states);
h_seeds = (unsigned long long *)malloc(N * sizeof(unsigned long long));
h_sequences = (unsigned long long *)malloc(N * sizeof(unsigned long long));
h_states = (hiprandState_t *)malloc(N * sizeof(hiprandState_t));
hipMemcpy(h_seeds, seeds, N * sizeof(unsigned long long), hipMemcpyDeviceToHost);
hipMemcpy(h_sequences, sequences, N * sizeof(unsigned long long), hipMemcpyDeviceToHost);
hipMemcpy(h_states, states, N * sizeof(hiprandState_t), hipMemcpyDeviceToHost);
printf("seed, sequence, hiprandState_t\n");
for (int i = 0; i < nBlocks; i++) {
printf("Block [%d]\n", i);
for (int j = 0; j < nThreads; j++) {
int idx = i * nThreads + j;
printf("\tThread [%u] (%llu, %llu), %u - ", j, h_seeds[idx], h_sequences[idx], h_states[idx].d);
for (int k = 0; k < 5; k++) {
printf("%d, ", h_states[idx].v[k]);
}
printf("\n");
}
printf("\n");
}
/* allocate an array of unsigned ints on the CPU and GPU */
unsigned int* cpu_nums;
unsigned int* gpu_nums;
cpu_nums = (unsigned int*)malloc(nRandomNumbers * N * sizeof(unsigned int));
hipMalloc((void**) &gpu_nums, nRandomNumbers * N * sizeof(unsigned int));
hipDeviceSynchronize();
/* invoke the kernel to get some random numbers */
for (int i = 0; i < nRandomNumbers; i++)
hipLaunchKernelGGL(( randoms), dim3(nBlocks), dim3(nThreads), 0, 0, states, gpu_nums + N*i);
hipError_t err = hipGetLastError();
if (err) {
printf("ERROR... %s\n", hipGetErrorString(err));
}
/* copy the random numbers back */
hipMemcpy(cpu_nums, gpu_nums, nRandomNumbers * N * sizeof(unsigned int), hipMemcpyDeviceToHost);
/* print them out */
for (int i = 0; i < nBlocks; i++) {
printf("Block [%d]\n", i);
for (int j = 0; j < nThreads; j++) {
printf("\tThread [%d]: ", j);
for (int k = 0; k < nRandomNumbers; k++) {
printf("%u ", (cpu_nums + k*N)[i*nThreads + j]);
}
printf("\n");
}
printf("\n");
}
/* free the memory we allocated for the states and numbers */
hipFree(states);
hipFree(seeds);
hipFree(sequences);
hipFree(gpu_nums);
free(cpu_nums);
free(h_states);
free(h_seeds);
free(h_sequences);
return 0;
}
| 14b53efe50e13b88c253cdd8ca121b819eed60aa.cu | /* random3.cu */
#include <unistd.h>
#include <stdio.h>
/* we need these includes for CUDA's random number stuff */
#include <curand.h>
#include <curand_kernel.h>
#define nBlocks 2
#define nThreads 3
#define N (nBlocks*nThreads)
#define MAX 100
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, unsigned long long *seeds,
unsigned long long *sequences, curandState_t* states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
seeds[idx] = seed;
sequences[idx] = blockIdx.x;
/* we have to initialize the state */
curand_init(seeds[idx], /* the seed can be the same for each core, here we pass the time in from the CPU */
sequences[idx], /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[idx]);
}
/* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */
__global__ void randoms(curandState_t* states, unsigned int* numbers) {
/* curand works like rand - except that it takes a state as a parameter */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
numbers[idx] = curand(&(states[idx])) % MAX;
}
int main(int argc, char* argv[]) {
int nRandomNumbers;
if (argc > 1) {
nRandomNumbers = atoi(argv[1]);
} else {
nRandomNumbers = 20;
}
/* CUDA's random number library uses curandState_t to keep track of the seed value
we will store a random state for every thread */
curandState_t* states;
unsigned long long *seeds;
unsigned long long *sequences;
curandState_t* h_states;
unsigned long long *h_seeds;
unsigned long long *h_sequences;
/* allocate space on the GPU for the random states */
cudaMalloc((void**) &seeds, N * sizeof(unsigned long long));
cudaMalloc((void**) &sequences, N * sizeof(unsigned long long));
cudaMalloc((void**) &states, N * sizeof(curandState_t));
/* invoke the GPU to initialize all of the random states */
init<<<nBlocks, nThreads>>>(0, seeds, sequences, states);
h_seeds = (unsigned long long *)malloc(N * sizeof(unsigned long long));
h_sequences = (unsigned long long *)malloc(N * sizeof(unsigned long long));
h_states = (curandState_t *)malloc(N * sizeof(curandState_t));
cudaMemcpy(h_seeds, seeds, N * sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaMemcpy(h_sequences, sequences, N * sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaMemcpy(h_states, states, N * sizeof(curandState_t), cudaMemcpyDeviceToHost);
printf("seed, sequence, curandState_t\n");
for (int i = 0; i < nBlocks; i++) {
printf("Block [%d]\n", i);
for (int j = 0; j < nThreads; j++) {
int idx = i * nThreads + j;
printf("\tThread [%u] (%llu, %llu), %u - ", j, h_seeds[idx], h_sequences[idx], h_states[idx].d);
for (int k = 0; k < 5; k++) {
printf("%d, ", h_states[idx].v[k]);
}
printf("\n");
}
printf("\n");
}
/* allocate an array of unsigned ints on the CPU and GPU */
unsigned int* cpu_nums;
unsigned int* gpu_nums;
cpu_nums = (unsigned int*)malloc(nRandomNumbers * N * sizeof(unsigned int));
cudaMalloc((void**) &gpu_nums, nRandomNumbers * N * sizeof(unsigned int));
cudaDeviceSynchronize();
/* invoke the kernel to get some random numbers */
for (int i = 0; i < nRandomNumbers; i++)
randoms<<<nBlocks, nThreads>>>(states, gpu_nums + N*i);
cudaError_t err = cudaGetLastError();
if (err) {
printf("ERROR... %s\n", cudaGetErrorString(err));
}
/* copy the random numbers back */
cudaMemcpy(cpu_nums, gpu_nums, nRandomNumbers * N * sizeof(unsigned int), cudaMemcpyDeviceToHost);
/* print them out */
for (int i = 0; i < nBlocks; i++) {
printf("Block [%d]\n", i);
for (int j = 0; j < nThreads; j++) {
printf("\tThread [%d]: ", j);
for (int k = 0; k < nRandomNumbers; k++) {
printf("%u ", (cpu_nums + k*N)[i*nThreads + j]);
}
printf("\n");
}
printf("\n");
}
/* free the memory we allocated for the states and numbers */
cudaFree(states);
cudaFree(seeds);
cudaFree(sequences);
cudaFree(gpu_nums);
free(cpu_nums);
free(h_states);
free(h_seeds);
free(h_sequences);
return 0;
}
|
2d49418658dea669ef472038ce4b458d6614afe9.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "rocblas.h"
#include <iostream>
using namespace std;
/* GPU HADAMARD PRODUCT FUNCTION
* Following are the gpu kernel and the host-side wrapper function for two matrices
* of same dimensions.
*/
template <typename T1, typename T2, typename T3>
__global__ void hadamard_d(T1* A, T2* B, T3* C, int m, int n)
{
int r = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.x*blockDim.x + threadIdx.x;
if (r < m && c < n){
C[c*m + r] = (T3)A[c*m + r] * B[c*m + r];
}
}
/*
Call the following function like
// Hadamard<float, float, float>(Y.d_elems, B.d_elems, C.d_elems, M, N);
*/
template <typename T1, typename T2, typename T3>
void Hadamard(T1* A, T2* B, T3* C, int m, int n)
{
const int L = max(m, n) > 16?16:max(m, n);
dim3 threadsPerBlock(L, L);
dim3 numBlocks(ceil((double)m / L), ceil((double)n / L));
hipLaunchKernelGGL(( hadamard_d<T1, T2, T3>) , dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, A, B, C, m, n);
}
/* GPU ADD FUNCTION
* Following are the gpu kernel and the host side wrapper function for matrix
* addition operation.
*/
template <typename T1, typename T2, typename T3>
__global__ void add_d(T1* A, T2* B, T3* C, int m, int n)
{
int r = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.x*blockDim.x + threadIdx.x;
if (r < m && c < n){
C[c*m + r] = A[c*m + r] + B[c*m + r];
}
}
/*
Call the following function like
// Add<float, float, float>(Y.d_elems, B.d_elems, C.d_elems, M, N);
*/
template <typename T1, typename T2, typename T3>
void Add(T1* A, T2* B, T3* C, int m, int n)
{
const int L = max(m, n) > 16 ? 16 : max(m, n);
dim3 threadsPerBlock(L, L);
dim3 numBlocks(ceil((double)m / L), ceil((double)n / L));
cout << threadsPerBlock.y <<" " << threadsPerBlock.x << endl;
cout << numBlocks.y << " " << numBlocks.x << endl;
hipLaunchKernelGGL(( add_d<T1, T2, T3>) , dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, A, B, C, m, n);
}
/* GPU MATRIX MULTIPLY FUNCTION
* Following are the gpu kernel and the host side wrapper function for matrix
* multiplication of two matrices A and B where A is m x k matrix and B is a
* k x n matrix.
*/
template <typename T1, typename T2, typename T3>
__global__ void matmul_d(T1* A, T2* B, T3* C, int m, int n, int k)
{
int r = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.x*blockDim.x + threadIdx.x;
T3 cvalue = 0;
if (r < m && c < n){
for (int i = 0; i < k; i++)
{
cvalue += (T3)(A[i*m + r] * B[c*k + i]);
}
C[c*m + r] = cvalue;
}
}
/*
Call the following function like
// MatMul<float, float, float>(Y.d_elems, B.d_elems, C.d_elems, M, N, K);
*/
template <typename T1, typename T2, typename T3>
void MatMul(T1* A, T2* B, T3* C, int m, int n, int k)
{
const int L = min(max(m, n), 32);
dim3 threadsPerBlock(L, L);
dim3 numBlocks(ceil((double)m / L), ceil((double)n / L));
cout << threadsPerBlock.y << " " << threadsPerBlock.x << endl;
cout << numBlocks.y << " " << numBlocks.x << endl;
hipLaunchKernelGGL(( matmul_d<T1, T2, T3>), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, A, B, C, m, n, k);
} | 2d49418658dea669ef472038ce4b458d6614afe9.cu | #pragma once
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cublas_v2.h"
#include <iostream>
using namespace std;
/* GPU HADAMARD PRODUCT FUNCTION
* Following are the gpu kernel and the host-side wrapper function for two matrices
* of same dimensions.
*/
template <typename T1, typename T2, typename T3>
__global__ void hadamard_d(T1* A, T2* B, T3* C, int m, int n)
{
int r = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.x*blockDim.x + threadIdx.x;
if (r < m && c < n){
C[c*m + r] = (T3)A[c*m + r] * B[c*m + r];
}
}
/*
Call the following function like
// Hadamard<float, float, float>(Y.d_elems, B.d_elems, C.d_elems, M, N);
*/
template <typename T1, typename T2, typename T3>
void Hadamard(T1* A, T2* B, T3* C, int m, int n)
{
const int L = max(m, n) > 16?16:max(m, n);
dim3 threadsPerBlock(L, L);
dim3 numBlocks(ceil((double)m / L), ceil((double)n / L));
hadamard_d<T1, T2, T3> <<<numBlocks, threadsPerBlock >>>(A, B, C, m, n);
}
/* GPU ADD FUNCTION
* Following are the gpu kernel and the host side wrapper function for matrix
* addition operation.
*/
template <typename T1, typename T2, typename T3>
__global__ void add_d(T1* A, T2* B, T3* C, int m, int n)
{
int r = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.x*blockDim.x + threadIdx.x;
if (r < m && c < n){
C[c*m + r] = A[c*m + r] + B[c*m + r];
}
}
/*
Call the following function like
// Add<float, float, float>(Y.d_elems, B.d_elems, C.d_elems, M, N);
*/
template <typename T1, typename T2, typename T3>
void Add(T1* A, T2* B, T3* C, int m, int n)
{
const int L = max(m, n) > 16 ? 16 : max(m, n);
dim3 threadsPerBlock(L, L);
dim3 numBlocks(ceil((double)m / L), ceil((double)n / L));
cout << threadsPerBlock.y <<" " << threadsPerBlock.x << endl;
cout << numBlocks.y << " " << numBlocks.x << endl;
add_d<T1, T2, T3> <<<numBlocks, threadsPerBlock >>>(A, B, C, m, n);
}
/* GPU MATRIX MULTIPLY FUNCTION
* Following are the gpu kernel and the host side wrapper function for matrix
* multiplication of two matrices A and B where A is m x k matrix and B is a
* k x n matrix.
*/
template <typename T1, typename T2, typename T3>
__global__ void matmul_d(T1* A, T2* B, T3* C, int m, int n, int k)
{
int r = blockIdx.y*blockDim.y + threadIdx.y;
int c = blockIdx.x*blockDim.x + threadIdx.x;
T3 cvalue = 0;
if (r < m && c < n){
for (int i = 0; i < k; i++)
{
cvalue += (T3)(A[i*m + r] * B[c*k + i]);
}
C[c*m + r] = cvalue;
}
}
/*
Call the following function like
// MatMul<float, float, float>(Y.d_elems, B.d_elems, C.d_elems, M, N, K);
*/
template <typename T1, typename T2, typename T3>
void MatMul(T1* A, T2* B, T3* C, int m, int n, int k)
{
const int L = min(max(m, n), 32);
dim3 threadsPerBlock(L, L);
dim3 numBlocks(ceil((double)m / L), ceil((double)n / L));
cout << threadsPerBlock.y << " " << threadsPerBlock.x << endl;
cout << numBlocks.y << " " << numBlocks.x << endl;
matmul_d<T1, T2, T3><<<numBlocks, threadsPerBlock >>>(A, B, C, m, n, k);
} |
47a6b94c62738e9209af6dde3bef353575351a56.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=7) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| 47a6b94c62738e9209af6dde3bef353575351a56.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=7) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
6227541baefa8a18eef88f2a977dd5c697aa9036.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "conv2DDevice.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
int *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
conv2DDevice), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
conv2DDevice), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
conv2DDevice), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6227541baefa8a18eef88f2a977dd5c697aa9036.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "conv2DDevice.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
int *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
conv2DDevice<<<gridBlock,threadBlock>>>(in,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
conv2DDevice<<<gridBlock,threadBlock>>>(in,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
conv2DDevice<<<gridBlock,threadBlock>>>(in,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a83d98eb8d54a9a305c09fdcbde1c6ea2b3ac38a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "yololayer.h"
#include "cuda_utils.h"
#include <cassert>
#include <vector>
#include <iostream>
namespace Tn {
template<typename T>
void write(char*& buffer, const T& val) {
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template<typename T>
void read(const char*& buffer, T& val) {
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
}
namespace nvinfer1 {
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, bool is_segmentation, const std::vector<YoloKernel>& vYoloKernel) {
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
is_segmentation_ = is_segmentation;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* kNumAnchor * 2;
for (int ii = 0; ii < mKernelCount; ii++) {
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin() {
for (int ii = 0; ii < mKernelCount; ii++) {
CUDA_CHECK(hipFree(mAnchor[ii]));
}
CUDA_CHECK(hipHostFree(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) {
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
read(d, is_segmentation_);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* kNumAnchor * 2;
for (int ii = 0; ii < mKernelCount; ii++) {
CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT {
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
write(d, is_segmentation_);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT {
size_t s = sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount);
s += sizeof(YoloKernel) * mYoloKernel.size();
s += sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight);
s += sizeof(mMaxOutObject) + sizeof(is_segmentation_);
return s;
}
int YoloLayerPlugin::initialize() TRT_NOEXCEPT {
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT {
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT {
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT {
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT {
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT {
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT {
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT {}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {}
const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT {
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
void YoloLayerPlugin::destroy() TRT_NOEXCEPT {
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT {
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, is_segmentation_, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth,
int yoloHeight, const float anchors[kNumAnchor * 2], int classes, int outputElem, bool is_segmentation) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
if (is_segmentation) info_len_i += 32;
const float* curInput = input + bnIdx * (info_len_i * total_grid * kNumAnchor);
for (int k = 0; k < kNumAnchor; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < kIgnoreThresh) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < 5 + classes; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char *data = (char*)res_count + sizeof(float) + count * sizeof(Detection);
Detection *det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
for (int i = 0; is_segmentation && i < 32; i++) {
det->mask[i] = curInput[idx + k * info_len_i * total_grid + (i + 5 + classes) * total_grid];
}
}
}
void YoloLayerPlugin::forwardGpu(const float* const* inputs, float *output, hipStream_t stream, int batchSize) {
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemsetAsync(output + idx * outputElem, 0, sizeof(float), stream));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i) {
const auto& yolo = mYoloKernel[i];
numElem = yolo.width * yolo.height * batchSize;
if (numElem < mThreadCount) mThreadCount = numElem;
CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem, is_segmentation_);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, hipStream_t stream) TRT_NOEXCEPT {
forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator() {
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT {
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT {
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT {
assert(fc->nbFields == 2);
assert(strcmp(fc->fields[0].name, "netinfo") == 0);
assert(strcmp(fc->fields[1].name, "kernels") == 0);
int *p_netinfo = (int*)(fc->fields[0].data);
int class_count = p_netinfo[0];
int input_w = p_netinfo[1];
int input_h = p_netinfo[2];
int max_output_object_count = p_netinfo[3];
bool is_segmentation = (bool)p_netinfo[4];
std::vector<YoloKernel> kernels(fc->fields[1].length);
memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(YoloKernel));
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, is_segmentation, kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT {
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| a83d98eb8d54a9a305c09fdcbde1c6ea2b3ac38a.cu | #include "yololayer.h"
#include "cuda_utils.h"
#include <cassert>
#include <vector>
#include <iostream>
namespace Tn {
template<typename T>
void write(char*& buffer, const T& val) {
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template<typename T>
void read(const char*& buffer, T& val) {
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
}
namespace nvinfer1 {
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, bool is_segmentation, const std::vector<YoloKernel>& vYoloKernel) {
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
is_segmentation_ = is_segmentation;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* kNumAnchor * 2;
for (int ii = 0; ii < mKernelCount; ii++) {
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin() {
for (int ii = 0; ii < mKernelCount; ii++) {
CUDA_CHECK(cudaFree(mAnchor[ii]));
}
CUDA_CHECK(cudaFreeHost(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) {
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
read(d, is_segmentation_);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* kNumAnchor * 2;
for (int ii = 0; ii < mKernelCount; ii++) {
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT {
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
write(d, is_segmentation_);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT {
size_t s = sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount);
s += sizeof(YoloKernel) * mYoloKernel.size();
s += sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight);
s += sizeof(mMaxOutObject) + sizeof(is_segmentation_);
return s;
}
int YoloLayerPlugin::initialize() TRT_NOEXCEPT {
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT {
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT {
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT {
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT {
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT {
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT {
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT {}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {}
const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT {
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
void YoloLayerPlugin::destroy() TRT_NOEXCEPT {
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT {
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, is_segmentation_, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth,
int yoloHeight, const float anchors[kNumAnchor * 2], int classes, int outputElem, bool is_segmentation) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
if (is_segmentation) info_len_i += 32;
const float* curInput = input + bnIdx * (info_len_i * total_grid * kNumAnchor);
for (int k = 0; k < kNumAnchor; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < kIgnoreThresh) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < 5 + classes; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char *data = (char*)res_count + sizeof(float) + count * sizeof(Detection);
Detection *det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
for (int i = 0; is_segmentation && i < 32; i++) {
det->mask[i] = curInput[idx + k * info_len_i * total_grid + (i + 5 + classes) * total_grid];
}
}
}
void YoloLayerPlugin::forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize) {
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemsetAsync(output + idx * outputElem, 0, sizeof(float), stream));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i) {
const auto& yolo = mYoloKernel[i];
numElem = yolo.width * yolo.height * batchSize;
if (numElem < mThreadCount) mThreadCount = numElem;
CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem, is_segmentation_);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT {
forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator() {
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT {
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT {
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT {
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT {
assert(fc->nbFields == 2);
assert(strcmp(fc->fields[0].name, "netinfo") == 0);
assert(strcmp(fc->fields[1].name, "kernels") == 0);
int *p_netinfo = (int*)(fc->fields[0].data);
int class_count = p_netinfo[0];
int input_w = p_netinfo[1];
int input_h = p_netinfo[2];
int max_output_object_count = p_netinfo[3];
bool is_segmentation = (bool)p_netinfo[4];
std::vector<YoloKernel> kernels(fc->fields[1].length);
memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(YoloKernel));
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, is_segmentation, kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT {
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
a7b99bf91c3abbad8d3838116bf9f7af3b14b880.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "iostream"
__global__ void feelScreenGPU(int* screen, int ScreenWidth, int ScreenHeight, double leftB, double downB, double pWidth, double pHeight, int iterations) {
int threadNum = blockIdx.x * blockDim.x + threadIdx.x;
//if (threadNum < ScreenWidth * ScreenHeight) {
int count = 0;
double r1 = 0;
double r2 = leftB + pWidth * (threadNum % ScreenWidth);
double c1 = 0;
double c2 = downB + pHeight * (threadNum / ScreenHeight);
while (count < iterations)
{
double r1Temp = r1;
r1 = r1 * r1 - c1 * c1 + r2;
c1 = 2 * r1Temp * c1 + c2;
if ((r1 * r1 + c1 * c1) > 4) {
break;
}
count++;
}
screen[threadNum] = count;
//}
}
void CalculateScreen(int* screen, int ScreenWidth, int ScreenHeight, double leftB, double downB, double pWidth, double pHeight, int iterations, int Blocks, int Threads) {
hipLaunchKernelGGL(( feelScreenGPU) , dim3(Blocks), dim3(Threads), 0, 0, screen, ScreenWidth, ScreenHeight, leftB, downB, pWidth, pHeight, iterations);
hipDeviceSynchronize();
}
void FreeMem(int* screen) {
hipFree(screen);
}
int* AllocateMem(int* screen, int memSize) {
hipMallocManaged(&screen, memSize * sizeof(int));
return screen;
} | a7b99bf91c3abbad8d3838116bf9f7af3b14b880.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "iostream"
__global__ void feelScreenGPU(int* screen, int ScreenWidth, int ScreenHeight, double leftB, double downB, double pWidth, double pHeight, int iterations) {
int threadNum = blockIdx.x * blockDim.x + threadIdx.x;
//if (threadNum < ScreenWidth * ScreenHeight) {
int count = 0;
double r1 = 0;
double r2 = leftB + pWidth * (threadNum % ScreenWidth);
double c1 = 0;
double c2 = downB + pHeight * (threadNum / ScreenHeight);
while (count < iterations)
{
double r1Temp = r1;
r1 = r1 * r1 - c1 * c1 + r2;
c1 = 2 * r1Temp * c1 + c2;
if ((r1 * r1 + c1 * c1) > 4) {
break;
}
count++;
}
screen[threadNum] = count;
//}
}
void CalculateScreen(int* screen, int ScreenWidth, int ScreenHeight, double leftB, double downB, double pWidth, double pHeight, int iterations, int Blocks, int Threads) {
feelScreenGPU <<<Blocks, Threads>>> (screen, ScreenWidth, ScreenHeight, leftB, downB, pWidth, pHeight, iterations);
cudaDeviceSynchronize();
}
void FreeMem(int* screen) {
cudaFree(screen);
}
int* AllocateMem(int* screen, int memSize) {
cudaMallocManaged(&screen, memSize * sizeof(int));
return screen;
} |
61b2f301e056a658f5ee8b0d1ac3aeaff7060347.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha
// =============================================================================
#include <thrust/extrema.h>
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChFsiForceIISPH.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
#define RESOLUTION_LENGTH_MULT_IISPH 2.0
//==========================================================================================================================================
namespace chrono {
namespace fsi {
// double precision atomic add function
__device__ inline double datomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
ChFsiForceIISPH::ChFsiForceIISPH(std::shared_ptr<ChBce> otherBceWorker,
std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
bool verb)
: ChFsiForce(otherBceWorker,
otherSortedSphMarkersD,
otherMarkersProximityD,
otherFsiGeneralData,
otherParamsH,
otherNumObjects,
verb) {}
ChFsiForceIISPH::~ChFsiForceIISPH() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceIISPH::Initialize() {
ChFsiForce::Initialize();
hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
hipMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
hipDeviceSynchronize();
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void V_i_np__AND__d_ii_kernel(Real4* sortedPosRad, // input: sorted positions
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* d_ii,
Real3* V_i_np,
Real* sumWij_inv,
Real* G_tensor,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) {
return;
}
// sortedRhoPreMu[i_idx].x = sortedRhoPreMu[i_idx].x / sumWij_inv[i_idx];
Real h_i = sortedPosRad[i_idx].w;
Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real mu_0 = paramsD.mu0;
Real epsilon = paramsD.epsMinMarkersDis;
Real dT = delta_t;
Real3 source_term = paramsD.gravity + paramsD.bodyForce3;
Real RHO_0 = paramsD.rho0;
if (sortedRhoPreMu[i_idx].x < EPSILON) {
printf("density is %f,ref density= %f\n", sortedRhoPreMu[i_idx].x, RHO_0);
}
Real3 posi = mR3(sortedPosRad[i_idx]);
Real3 Veli = sortedVelMas[i_idx];
Real Rhoi = sortedRhoPreMu[i_idx].x;
Real3 My_d_ii = mR3(0);
Real3 My_F_i_np = mR3(0);
// get address in grid
int3 gridPos = calcGridPos(posi);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posj = mR3(sortedPosRad[j]);
Real3 rij = Distance(posi, posj);
Real d = length(rij);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real3 eij = rij / d;
Real3 Velj = sortedVelMas[j];
Real Rhoj = sortedRhoPreMu[j].x;
Real h_j = sortedPosRad[j].w;
if (Rhoj == 0) {
printf("Bug F_i_np__AND__d_ii_kernel i=%d j=%d, hi=%f, hj=%f\n", i_idx, j, h_i, h_j);
}
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_ij = GradWh(rij, h_ij);
My_d_ii += m_j * (-(dT * dT) / (Rhoi * Rhoi)) * grad_ij;
Real Rho_bar = (Rhoj + Rhoi) * 0.5;
Real3 V_ij = (Veli - Velj);
// Real nu = mu_0 * paramsD.HSML * 320 / Rho_bar;
// Real3 muNumerator = nu * fmin(0.0, dot(rij, V_ij)) * grad_ij;
Real3 muNumerator = 2 * mu_0 * dot(rij, grad_ij) * V_ij;
Real muDenominator = (Rho_bar * Rho_bar) * (d * d + h_ij * h_ij * epsilon);
// if ((sortedRhoPreMu[i_idx].w < 0 && sortedRhoPreMu[j].w < 0))
// if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 &&
// sortedRhoPreMu[j].w < 0))
My_F_i_np += m_j * muNumerator / muDenominator;
Real Wd = W3h(d, h_ij);
My_F_i_np -= paramsD.kappa / m_i * m_j * Wd * rij;
}
}
}
}
}
// if (!paramsD.Conservative_Form)
// My_F_i_np = mu_0 * LaplacainVi;
My_F_i_np *= m_i;
My_F_i_np += m_i * source_term;
d_ii[i_idx] = My_d_ii;
V_i_np[i_idx] = (My_F_i_np * dT + Veli); // This does not contain m_0?
}
//--------------------------------------------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Rho_np_AND_a_ii_AND_sum_m_GradW(Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real* rho_np, // Write
Real* a_ii, // Write
Real* p_old, // Write
Real3* V_np, // Read
Real3* d_ii, // Read
Real3* sum_m_GradW,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) {
return;
}
Real h_i = sortedPosRad[i_idx].w;
Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real3 posi = mR3(sortedPosRad[i_idx]);
Real3 Veli_np = V_np[i_idx];
Real Rho_i = sortedRhoPreMu[i_idx].x;
Real3 my_d_ii = d_ii[i_idx];
Real rho_temp = 0;
Real my_a_ii = 0;
Real3 My_sum_m_gradW = mR3(0);
Real dT = delta_t;
// get address in gridj
int3 gridPos = calcGridPos(posi);
//
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posj = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posi, posj);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real h_ij = 0.5 * (h_j + h_i);
Real3 Velj_np = V_np[j];
Real3 grad_i_wij = GradWh(dist3, h_ij);
rho_temp += m_j * dot((Veli_np - Velj_np), grad_i_wij);
Real3 d_ji = m_i * (-(dT * dT) / (Rho_i * Rho_i)) * (-grad_i_wij);
my_a_ii += m_j * dot((my_d_ii - d_ji), grad_i_wij);
My_sum_m_gradW += m_j * grad_i_wij;
}
}
}
}
}
rho_np[i_idx] = dT * rho_temp + sortedRhoPreMu[i_idx].x;
// Note: a_ii can become zero and when this can cause divide by 0 issues for free particles
a_ii[i_idx] = abs(my_a_ii) > EPSILON ? my_a_ii : 1.0;
sum_m_GradW[i_idx] = My_sum_m_gradW;
p_old[i_idx] = sortedRhoPreMu[i_idx].y; // = 1000; // Note that this is outside of the for loop
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_dij_pj(Real3* dij_pj, // write
Real3* F_p, // Write
Real3* d_ii, // Read
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* p_old,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) {
return;
}
Real h_i = sortedPosRad[i_idx].w;
Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real3 my_F_p = mR3(0);
Real p_i_old = p_old[i_idx];
Real3 pos_i = mR3(sortedPosRad[i_idx]);
Real Rho_i = sortedRhoPreMu[i_idx].x;
if (sortedRhoPreMu[i_idx].x < EPSILON) {
printf("(Calc_dij_pj) My density is %f in Calc_dij_pj\n", sortedRhoPreMu[i_idx].x);
}
Real dT = delta_t;
Real3 My_dij_pj = mR3(0);
int3 gridPos = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
////CHECK THIS CONDITION!!!
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_i_wij = GradWh(dist3, h_ij);
Real Rho_j = sortedRhoPreMu[j].x;
Real p_j_old = p_old[j];
My_dij_pj += m_j * (-(dT * dT) / (Rho_j * Rho_j)) * grad_i_wij * p_j_old;
my_F_p += m_j * ((p_i_old / (Rho_i * Rho_i)) + (p_j_old / (Rho_j * Rho_j))) * grad_i_wij;
}
}
}
}
}
dij_pj[i_idx] = My_dij_pj;
F_p[i_idx] = -m_i * my_F_p;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcNumber_Contacts(uint* numContacts,
Real4* sortedPosRad,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
numContacts[i_idx] = 1;
return;
}
Real h_i = sortedPosRad[i_idx].w;
// Real m_i = h_i * h_i * h_i * paramsD.rho0;
int myType = sortedRhoPreMu[i_idx].w;
Real3 pos_i = mR3(sortedPosRad[i_idx]);
uint numCol[400];
int counter = 1;
numCol[0] = i_idx; // The first one is always the idx of the marker itself
int3 gridPos = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) {
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
bool AlreadyHave = false;
for (uint findCol = 1; findCol <= counter; findCol++) {
if (numCol[findCol] == j) {
AlreadyHave = true;
continue;
}
}
// Room for improvment ...
if (!AlreadyHave) {
numCol[counter] = j;
counter++;
// Do not count BCE-BCE interactions...
if (myType >= 0 && sortedRhoPreMu[j].w >= 0 && paramsD.bceType == BceVersion::ADAMI)
counter--;
}
if (myType != -1) // For BCE no need to go deeper than this...
continue;
Real h_j = sortedPosRad[j].w;
int3 gridPosJ = calcGridPos(pos_j);
for (int zz = -1; zz <= 1; zz++) {
for (int yy = -1; yy <= 1; yy++) {
for (int xx = -1; xx <= 1; xx++) {
int3 neighbourPosJ = gridPosJ + mI3(xx, yy, zz);
uint gridHashJ = calcGridHash(neighbourPosJ);
uint startIndexJ = cellStart[gridHashJ];
if (startIndexJ != 0xffffffff) { // cell is not empty
uint endIndexJ = cellEnd[gridHashJ];
for (uint k = startIndexJ; k < endIndexJ; k++) {
Real3 pos_k = mR3(sortedPosRad[k]);
Real3 dist3jk = Distance(pos_j, pos_k);
Real djk = length(dist3jk);
if (djk > RESOLUTION_LENGTH_MULT * h_j || k == j || k == i_idx ||
sortedRhoPreMu[k].w <= -2)
continue;
bool AlreadyHave2 = false;
for (uint findCol = 1; findCol <= counter; findCol++) {
if (numCol[findCol] == k) {
AlreadyHave2 = true;
continue;
}
}
if (!AlreadyHave2) {
numCol[counter] = k;
counter++;
}
}
}
}
}
}
}
}
}
}
}
numContacts[i_idx] = counter + 10;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_summGradW(Real3* summGradW, // write
Real4* sortedPosRad,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
return;
}
Real h_i = sortedPosRad[i_idx].w;
// Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real3 pos_i = mR3(sortedPosRad[i_idx]);
Real3 My_summgradW = mR3(0);
// Real dT = paramsD.dT;
int3 gridPos = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_i_wij = GradWh(dist3, h_ij);
My_summgradW += m_j * grad_i_wij;
}
}
}
}
}
summGradW[i_idx] = My_summgradW;
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ void Calc_BC_aij_Bi(const uint i_idx,
Real* csrValA,
uint* csrColIndA,
unsigned long int* GlobalcsrColIndA,
uint* numContacts,
// The above 4 vectors are used for CSR form.
Real* a_ii, // write
Real* B_i,
Real4* sortedPosRad,
Real3* sortedVelMas,
const Real4* sortedRhoPreMu,
Real3* V_new,
Real* p_old,
Real3* Normals,
Real* G_i,
Real* sumWij_inv,
Real4* qD,
Real3* rigidSPH_MeshPos_LRF_D,
Real3* posRigid_fsiBodies_D,
Real4* velMassRigid_fsiBodies_D,
Real3* omegaVelLRF_fsiBodies_D,
Real3* accRigid_fsiBodies_D,
Real3* omegaAccLRF_fsiBodies_D,
uint* rigidIdentifierD,
Real3* pos_fsi_fea_D,
Real3* vel_fsi_fea_D,
Real3* acc_fsi_fea_D,
uint* FlexIdentifierD,
const int numFlex1D,
uint2* CableElementsNodes,
uint4* ShellelementsNodes,
int4 updatePortion,
uint* gridMarkerIndexD,
const uint* cellStart,
const uint* cellEnd,
const size_t numAllMarkers,
bool IsSPARSE) {
uint csrStartIdx = numContacts[i_idx] + 1;
uint csrEndIdx = numContacts[i_idx + 1];
Real h_i = sortedPosRad[i_idx].w;
// Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real3 my_normal = Normals[i_idx];
Real3 source_term = paramsD.gravity + paramsD.bodyForce3;
// if (bceIndex >= numObjectsD.numRigidMarkers) {
// return;
// }
// int Original_idx = gridMarkerIndexD[i_idx];
Real3 myAcc = mR3(0.0);
Real3 V_prescribed = mR3(0.0);
// if (!(sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[i_idx].w <= 3))
// printf("type of marker is %f\n", sortedRhoPreMu[i_idx].w);
BCE_Vel_Acc(i_idx, myAcc, V_prescribed, sortedPosRad, updatePortion, gridMarkerIndexD, qD, rigidSPH_MeshPos_LRF_D,
posRigid_fsiBodies_D, velMassRigid_fsiBodies_D, omegaVelLRF_fsiBodies_D, accRigid_fsiBodies_D,
omegaAccLRF_fsiBodies_D, rigidIdentifierD, pos_fsi_fea_D, vel_fsi_fea_D, acc_fsi_fea_D, FlexIdentifierD,
numFlex1D, CableElementsNodes, ShellelementsNodes);
for (int c = csrStartIdx; c < csrEndIdx; c++) {
csrValA[c] = 0;
csrColIndA[c] = i_idx;
GlobalcsrColIndA[c] = i_idx + numAllMarkers * i_idx;
}
// if ((csrEndIdx - csrStartIdx) != uint(0)) {
Real3 numeratorv = mR3(0);
Real denumenator = 0;
Real pRHS = 0;
// Real Rho_i = sortedRhoPreMu[i_idx].x;
Real3 pos_i = mR3(sortedPosRad[i_idx]);
// get address in grid
int3 gridPos = calcGridPos(pos_i);
uint counter = 0;
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || j == i_idx)
continue;
Real h_j = sortedPosRad[j].w;
// Real m_j = h_j * h_j * h_j * paramsD.rho0;
// Real rhoj = sortedRhoPreMu[j].x;
Real h_ij = 0.5 * (h_j + h_i);
Real Wd = W3h(d, h_ij);
Real3 Vel_j = sortedVelMas[j];
if (paramsD.bceType != BceVersion::ADAMI) {
if (sortedRhoPreMu[j].w == -1.0 || dot(my_normal, mR3(pos_i - pos_j)) > 0) {
Real3 grad_i_wij = GradWh(dist3, h_ij);
csrValA[csrStartIdx - 1] += dot(grad_i_wij, my_normal);
csrValA[counter + csrStartIdx] = -dot(grad_i_wij, my_normal);
csrColIndA[counter + csrStartIdx] = j;
GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx;
counter++;
if (sortedRhoPreMu[j].w != -1)
continue;
numeratorv += Vel_j * Wd;
denumenator += Wd;
}
} else {
if (sortedRhoPreMu[j].w != -1 || sortedRhoPreMu[j].w <= -2)
continue;
numeratorv += Vel_j * Wd;
denumenator += Wd;
pRHS += dot(source_term - myAcc, dist3) * sortedRhoPreMu[j].x * Wd;
csrValA[counter + csrStartIdx] = -Wd;
csrColIndA[counter + csrStartIdx] = j;
GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx;
counter++;
}
}
}
}
}
}
if (abs(denumenator) < EPSILON) {
V_new[i_idx] = 2 * V_prescribed;
B_i[i_idx] = 0;
if (paramsD.bceType == BceVersion::ADAMI) {
csrValA[csrStartIdx - 1] = a_ii[i_idx];
csrColIndA[csrStartIdx - 1] = i_idx;
GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx;
}
} else {
Real Scaling = a_ii[i_idx] / denumenator;
V_new[i_idx] = 2 * V_prescribed - numeratorv / denumenator;
if (paramsD.bceType == BceVersion::ADAMI) {
B_i[i_idx] = pRHS;
csrValA[csrStartIdx - 1] = denumenator;
csrColIndA[csrStartIdx - 1] = i_idx;
GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx;
for (int i = csrStartIdx - 1; i < csrEndIdx; i++)
csrValA[i] *= Scaling;
B_i[i_idx] *= Scaling;
}
}
if (paramsD.bceType != BceVersion::ADAMI) {
Real Scaling = a_ii[i_idx];
if (abs(csrValA[csrStartIdx - 1]) > EPSILON) {
Scaling = a_ii[i_idx]; // csrValA[csrStartIdx - 1];
for (int count = csrStartIdx - 1; count < csrEndIdx; count++)
csrValA[count] *= Scaling;
} else {
clearRow(i_idx, csrStartIdx - 1, csrEndIdx, csrValA, B_i);
for (int count = csrStartIdx - 1; count < csrEndIdx; count++) {
int j = csrColIndA[counter];
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || j == i_idx) {
csrValA[count] = 0.0;
continue;
}
Real h_j = sortedPosRad[j].w;
Real h_ij = 0.5 * (h_j + h_i);
Real Wd = W3h(d, h_ij);
csrValA[count] = sumWij_inv[j] * Wd * Scaling;
}
csrValA[csrStartIdx - 1] -= 1.0 * Scaling;
}
B_i[i_idx] = 0.0 * Scaling;
}
sortedVelMas[i_idx] = V_new[i_idx];
} // namespace fsi
//--------------------------------------------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------------------------------------------
__device__ void Calc_fluid_aij_Bi(const uint i_idx,
Real* csrValA,
uint* csrColIndA,
unsigned long int* GlobalcsrColIndA,
uint* numContacts,
// The above 4 vectors are used for CSR form.
Real* B_i,
Real3* d_ii, // Read
Real* a_ii, // Read
Real* rho_np, // Read
Real3* summGradW,
Real4* sortedPosRad,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const int numAllMarkers,
bool IsSPARSE) {
Real3 pos_i = mR3(sortedPosRad[i_idx]);
Real dT = delta_t;
int counter = 0; // There is always one non-zero at each row- The marker itself
B_i[i_idx] = paramsD.rho0 - rho_np[i_idx];
uint csrStartIdx = numContacts[i_idx] + 1; // Reserve the starting index for the A_ii
uint csrEndIdx = numContacts[i_idx + 1];
Real h_i = sortedPosRad[i_idx].w;
// Real m_i = h_i * h_i * h_i * paramsD.rho0;
// for (int c = csrStartIdx; c < csrEndIdx; c++) {
// csrValA[c] = a_ii[i_idx];
// csrColIndA[c] = i_idx;
// GlobalcsrColIndA[c] = i_idx + numAllMarkers * i_idx;
// }
int3 gridPos = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
// Real Rho_i = sortedRhoPreMu[i_idx].x;
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real h_j = sortedPosRad[j].w;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_i_wij = GradWh(dist3, h_ij);
Real Rho_j = sortedRhoPreMu[j].x;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real3 d_it = m_j * (-(dT * dT) / (Rho_j * Rho_j)) * grad_i_wij;
Real My_a_ij_1 = m_j * dot(d_it, summGradW[i_idx]);
Real My_a_ij_2 = m_j * dot(d_ii[j], grad_i_wij);
Real My_a_ij_12 = My_a_ij_1 - My_a_ij_2;
bool DONE1 = false;
for (uint findCol = csrStartIdx; findCol < csrEndIdx; findCol++) {
if (csrColIndA[findCol] == j) {
csrValA[findCol] += My_a_ij_12;
csrColIndA[findCol] = j;
GlobalcsrColIndA[findCol] = j + numAllMarkers * i_idx;
DONE1 = true;
continue;
}
}
if (!DONE1) {
csrValA[counter + csrStartIdx] += My_a_ij_12;
csrColIndA[counter + csrStartIdx] = j;
GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx;
counter++;
}
int3 gridPosJ = calcGridPos(pos_j);
for (int zz = -1; zz <= 1; zz++) {
for (int yy = -1; yy <= 1; yy++) {
for (int xx = -1; xx <= 1; xx++) {
int3 neighbourPosJ = gridPosJ + mI3(xx, yy, zz);
uint gridHashJ = calcGridHash(neighbourPosJ);
uint startIndexJ = cellStart[gridHashJ];
if (startIndexJ != 0xffffffff) { // cell is not empty
uint endIndexJ = cellEnd[gridHashJ];
for (uint k = startIndexJ; k < endIndexJ; k++) {
Real3 pos_k = mR3(sortedPosRad[k]);
Real3 dist3jk = Distance(pos_j, pos_k);
Real djk = length(dist3jk);
if (djk > RESOLUTION_LENGTH_MULT_IISPH * h_j || k == j || k == i_idx ||
sortedRhoPreMu[k].w <= -2)
continue;
Real h_k = sortedPosRad[j].w;
Real h_jk = 0.5 * (h_j + h_k);
Real3 grad_j_wjk = GradWh(dist3jk, h_jk);
Real m_k = cube(sortedPosRad[k].w) * paramsD.rho0;
Real Rho_k = sortedRhoPreMu[k].x;
Real3 d_jk = m_k * (-(dT * dT) / (Rho_k * Rho_k)) * grad_j_wjk;
Real My_a_ij_3 = m_j * dot(d_jk, grad_i_wij);
bool DONE2 = false;
for (uint findCol = csrStartIdx; findCol < csrEndIdx; findCol++) {
if (csrColIndA[findCol] == k) {
csrValA[findCol] -= My_a_ij_3;
csrColIndA[findCol] = k;
GlobalcsrColIndA[findCol] = k + numAllMarkers * i_idx;
DONE2 = true;
continue;
}
}
if (!DONE2) {
csrValA[counter + csrStartIdx] -= My_a_ij_3;
csrColIndA[counter + csrStartIdx] = k;
GlobalcsrColIndA[counter + csrStartIdx] = k + numAllMarkers * i_idx;
counter++;
}
}
}
}
}
}
}
}
}
}
}
for (int myIdx = csrStartIdx; myIdx < csrEndIdx; myIdx++) {
if (csrColIndA[myIdx] == i_idx)
csrValA[myIdx] = a_ii[i_idx];
}
csrValA[csrStartIdx - 1] = a_ii[i_idx];
csrColIndA[csrStartIdx - 1] = i_idx;
GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx;
if (sortedRhoPreMu[i_idx].x < 0.999 * paramsD.rho0) {
csrValA[csrStartIdx - 1] = a_ii[i_idx];
for (int myIdx = csrStartIdx; myIdx < csrEndIdx; myIdx++) {
csrValA[myIdx] = 0.0;
B_i[i_idx] = 0.0;
}
}
Real RHS = B_i[i_idx];
B_i[i_idx] = RHS; // fminf(0.0, RHS);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void FormAXB(Real* csrValA,
uint* csrColIndA,
unsigned long int* GlobalcsrColIndA,
uint* numContacts,
// The above 4 vectors are used for CSR form.
Real* a_ij, // write
Real* B_i, // write
Real3* d_ii, // Read
Real* a_ii, // Read
Real3* summGradW,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* V_new,
Real* p_old,
Real3* Normals,
Real* G_i,
Real* sumWij_inv,
Real* rho_np,
Real4* qD,
Real3* rigidSPH_MeshPos_LRF_D,
Real3* posRigid_fsiBodies_D,
Real4* velMassRigid_fsiBodies_D,
Real3* omegaVelLRF_fsiBodies_D,
Real3* accRigid_fsiBodies_D,
Real3* omegaAccLRF_fsiBodies_D,
uint* rigidIdentifierD,
Real3* pos_fsi_fea_D,
Real3* vel_fsi_fea_D,
Real3* acc_fsi_fea_D,
uint* FlexIdentifierD,
const int numFlex1D,
uint2* CableElementsNodes,
uint4* ShellelementsNodes,
int4 updatePortion,
uint* gridMarkerIndexD,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const size_t numAllMarkers,
bool IsSPARSE,
volatile bool* isError) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
// Real m_0 = paramsD.markerMass;
// Real RHO_0 = paramsD.rho0;
// Real dT = paramsD.dT;
// Real3 gravity = paramsD.gravity;
int TYPE_OF_NARKER = sortedRhoPreMu[i_idx].w;
if (TYPE_OF_NARKER <= -2) {
B_i[i_idx] = 0;
uint csrStartIdx = numContacts[i_idx];
// This needs to be check to see if it messes up the condition number of the matrix
csrValA[csrStartIdx] = 1.0;
csrColIndA[csrStartIdx] = i_idx;
GlobalcsrColIndA[csrStartIdx] = i_idx + numAllMarkers * i_idx;
} else if (TYPE_OF_NARKER == -1) {
Calc_fluid_aij_Bi(i_idx, csrValA, csrColIndA, GlobalcsrColIndA, numContacts, B_i, d_ii, a_ii, rho_np, summGradW,
sortedPosRad, sortedRhoPreMu, cellStart, cellEnd, delta_t, numAllMarkers, true);
} else if (TYPE_OF_NARKER > -1)
Calc_BC_aij_Bi(i_idx, csrValA, csrColIndA, GlobalcsrColIndA, numContacts, a_ii, B_i, sortedPosRad, sortedVelMas,
sortedRhoPreMu, V_new, p_old, Normals, G_i, sumWij_inv,
qD, rigidSPH_MeshPos_LRF_D, posRigid_fsiBodies_D, velMassRigid_fsiBodies_D,
omegaVelLRF_fsiBodies_D, accRigid_fsiBodies_D, omegaAccLRF_fsiBodies_D, rigidIdentifierD,
pos_fsi_fea_D, vel_fsi_fea_D, acc_fsi_fea_D, FlexIdentifierD, numFlex1D, CableElementsNodes,
ShellelementsNodes,
updatePortion, gridMarkerIndexD, cellStart, cellEnd, numAllMarkers, true);
}
//--------------------------------------------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_Pressure_AXB_USING_CSR(Real* csrValA,
Real* a_ii,
uint* csrColIndA,
uint* numContacts,
Real4* sortedRhoPreMu,
Real* sumWij_inv,
Real3* sortedVelMas,
Real3* V_new,
Real* p_old,
Real* B_i, // Read
Real* Residuals,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
return;
}
// Real RHO_0 = paramsD.rho0;
// bool ClampPressure = paramsD.ClampPressure;
// Real Max_Pressure = paramsD.Max_Pressure;
uint startIdx = numContacts[i_idx] + 1; // numContacts[i_idx] is the diagonal itself
uint endIdx = numContacts[i_idx + 1];
Real aij_pj = 0;
// Real error = aij_pj + sortedRhoPreMu[i_idx].y * csrValA[startIdx - 1] - B_i[i_idx];
for (int myIdx = startIdx; myIdx < endIdx; myIdx++) {
if (csrColIndA[myIdx] != i_idx)
aij_pj += csrValA[myIdx] * p_old[csrColIndA[myIdx]];
}
Real RHS = B_i[i_idx];
Residuals[i_idx] = abs(RHS - aij_pj - p_old[i_idx] * csrValA[startIdx - 1]);
sortedRhoPreMu[i_idx].y = (RHS - aij_pj) / csrValA[startIdx - 1];
// if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0)
// sortedRhoPreMu[i_idx].y = 0;
if (!isfinite(aij_pj)) {
printf("a_ij *p_j became Nan in Calc_Pressure_AXB_USING_CSR ");
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_Pressure(Real* a_ii, // Read
Real3* d_ii, // Read
Real3* dij_pj, // Read
Real* rho_np, // Read
Real* rho_p, // Write
Real* Residuals,
Real3* F_p,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real4* qD,
Real3* rigidSPH_MeshPos_LRF_D,
Real3* posRigid_fsiBodies_D,
Real4* velMassRigid_fsiBodies_D,
Real3* omegaVelLRF_fsiBodies_D,
Real3* accRigid_fsiBodies_D,
Real3* omegaAccLRF_fsiBodies_D,
uint* rigidIdentifierD,
Real3* pos_fsi_fea_D,
Real3* vel_fsi_fea_D,
Real3* acc_fsi_fea_D,
uint* FlexIdentifierD,
const int numFlex1D,
uint2* CableElementsNodes,
uint4* ShellelementsNodes,
int4 updatePortion,
uint* gridMarkerIndexD,
Real* p_old,
Real3* V_new,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
return;
}
Real h_i = sortedPosRad[i_idx].w;
Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real RHO_0 = paramsD.rho0;
Real dT = delta_t;
Real3 source_term = paramsD.gravity + paramsD.bodyForce3;
if (sortedRhoPreMu[i_idx].x < EPSILON) {
printf("(Calc_Pressure)My density is %f in Calc_Pressure\n", sortedRhoPreMu[i_idx].x);
}
int myType = sortedRhoPreMu[i_idx].w;
Real Rho_i = sortedRhoPreMu[i_idx].x;
Real p_i = p_old[i_idx];
Real3 pos_i = mR3(sortedPosRad[i_idx]);
Real p_new = 0;
Real my_rho_p = 0;
Real3 F_i_p = F_p[i_idx];
if (myType == -1) {
if (Rho_i < 0.999 * RHO_0) {
p_new = 0;
Residuals[i_idx] = 0;
} else {
Real3 my_dij_pj = dij_pj[i_idx];
Real sum_dij_pj = 0; // This is the first summation term in the expression for the pressure.
Real sum_djj_pj = 0; // This is the second summation term in the expression for the pressure.
Real sum_djk_pk = 0; // This is the last summation term in the expression for the pressure.
int3 gridPosI = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPosI = gridPosI + mI3(x, y, z);
uint gridHashI = calcGridHash(neighbourPosI);
// get start of bucket for this cell
uint startIndexI = cellStart[gridHashI];
if (startIndexI != 0xffffffff) {
uint endIndexI = cellEnd[gridHashI];
for (uint j = startIndexI; j < endIndexI; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3ij = Distance(pos_i, pos_j);
Real dij = length(dist3ij);
if (dij > RESOLUTION_LENGTH_MULT * paramsD.HSML || i_idx == j ||
sortedRhoPreMu[j].w <= -2)
continue;
// Real Rho_j = sortedRhoPreMu[j].x;
Real p_j_old = p_old[j];
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real3 djj = d_ii[j];
Real3 F_j_p = F_p[j];
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_i_wij = GradWh(dist3ij, h_ij);
Real3 d_ji = m_i * (-(dT * dT) / (Rho_i * Rho_i)) * (-grad_i_wij);
Real3 djk_pk = dij_pj[j] - d_ji * p_i;
sum_dij_pj += m_j * dot(my_dij_pj, grad_i_wij);
sum_djj_pj += m_j * dot(djj, grad_i_wij) * p_j_old;
sum_djk_pk += m_j * dot(djk_pk, grad_i_wij);
my_rho_p += (dT * dT) * m_j * dot((F_i_p / m_i - F_j_p / m_j), grad_i_wij);
}
}
}
}
}
// Real RHS = fminf(0.0, RHO_0 - rho_np[i_idx]);
Real RHS = RHO_0 - rho_np[i_idx];
Real aij_pj = +sum_dij_pj - sum_djj_pj - sum_djk_pk;
p_new = (RHS - aij_pj) / a_ii[i_idx];
Residuals[i_idx] = abs(RHS - aij_pj - p_old[i_idx] * a_ii[i_idx]);
// sortedRhoPreMu[i_idx].x = aij_pj + p_new * a_ii[i_idx] + RHO_0 - RHS;
}
} else { // Do Adami BC
Real3 myAcc = mR3(0);
Real3 V_prescribed = mR3(0);
BCE_Vel_Acc(i_idx, myAcc, V_prescribed, sortedPosRad, updatePortion, gridMarkerIndexD, qD,
rigidSPH_MeshPos_LRF_D, posRigid_fsiBodies_D, velMassRigid_fsiBodies_D, omegaVelLRF_fsiBodies_D,
accRigid_fsiBodies_D, omegaAccLRF_fsiBodies_D, rigidIdentifierD, pos_fsi_fea_D, vel_fsi_fea_D,
acc_fsi_fea_D, FlexIdentifierD, numFlex1D, CableElementsNodes, ShellelementsNodes);
Real3 numeratorv = mR3(0);
Real denumenator = 0;
Real numeratorp = 0;
Real3 Vel_i;
// get address in grid
int3 gridPos = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML || sortedRhoPreMu[j].w != -1)
continue;
// OLD VELOCITY IS SHOULD BE OBDATED NOT THE NEW ONE!!!!!
Real3 Vel_j = sortedVelMas[j];
Real p_j = p_old[j];
Real3 F_j_p = F_p[j];
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
// Real rhoj = sortedRhoPreMu[j].x;
Real h_ij = 0.5 * (h_j + h_i);
Real Wd = W3h(d, h_ij);
numeratorv += Vel_j * Wd;
numeratorp += p_j * Wd + dot(source_term - myAcc, dist3) * sortedRhoPreMu[j].x * Wd;
denumenator += Wd;
Real3 TobeUsed = (F_i_p / m_i - F_j_p / m_j);
my_rho_p += (dT * dT) * m_j * dot(TobeUsed, GradWh(dist3, h_ij));
if (isnan(numeratorp))
printf("Something is wrong here..., %f\n", numeratorp);
}
}
}
}
}
if (abs(denumenator) < EPSILON) {
p_new = 0;
Vel_i = 2 * V_prescribed;
} else {
Vel_i = 2 * V_prescribed - numeratorv / denumenator;
p_new = numeratorp / denumenator;
}
Residuals[i_idx] = abs(numeratorp - denumenator * p_old[i_idx]) * a_ii[i_idx];
V_new[i_idx] = Vel_i;
}
// if (paramsD.ClampPressure && p_new < 0.0)
// p_new = 0.0;
rho_p[i_idx] = my_rho_p;
sortedRhoPreMu[i_idx].y = p_new;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Update_AND_Calc_Res(Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* p_old,
Real3* V_new,
Real* rho_p,
Real* rho_np,
Real* Residuals,
const size_t numAllMarkers,
const int Iteration,
Real params_relaxation,
bool IsSPARSE,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
return;
}
// p_i = (1 - relax) * p_old_i + relax * p_i;
sortedRhoPreMu[i_idx].y = (1 - params_relaxation) * p_old[i_idx] + params_relaxation * sortedRhoPreMu[i_idx].y;
// if(!paramsD.USE_LinearSolver)
// p_old[i_idx] = sortedRhoPreMu[i_idx].y;
// if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0)
// sortedRhoPreMu[i_idx].y = 0;
// Real AbsRes = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]);
// Real Updated_rho = rho_np[i_idx] + rho_p[i_idx];
// Real rho_res = abs(1000 - sortedRhoPreMu[i_idx].x); // Hard-coded for now
Real p_res = 0;
// p_res = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]) / (abs(p_old[i_idx]) + 0.00001);
p_res = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]);
p_old[i_idx] = sortedRhoPreMu[i_idx].y;
Residuals[i_idx] = p_res;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcForces(Real3* new_vel, // Write
Real4* derivVelRhoD,
Real4* sortedPosRad, // Read
Real3* sortedVelMas, // Read
Real4* sortedRhoPreMu,
Real* sumWij_inv,
Real* p_old,
Real3* r_shift,
uint* cellStart,
uint* cellEnd,
Real delta_t,
size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
sortedRhoPreMu[i_idx].x = 0;
sortedRhoPreMu[i_idx].y = 0;
sortedRhoPreMu[i_idx].z = 0;
return;
}
// if (sortedRhoPreMu[i_idx].w > -1) {
// return;
// }
Real mu_0 = paramsD.mu0;
Real h_i = sortedPosRad[i_idx].w;
Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real dT = delta_t;
Real3 source_term = paramsD.gravity + paramsD.bodyForce3;
Real epsilon = paramsD.epsMinMarkersDis;
Real3 posi = mR3(sortedPosRad[i_idx]);
Real3 Veli = sortedVelMas[i_idx];
Real p_i;
// if (sortedRhoPreMu[i_idx].w == -1)
p_i = sortedRhoPreMu[i_idx].y;
// else
// p_i = p_old[i_idx];
Real rho_i = sortedRhoPreMu[i_idx].x;
Real3 F_i_mu = mR3(0);
Real3 F_i_surface_tension = mR3(0);
Real3 F_i_p = mR3(0);
if ((sortedRhoPreMu[i_idx].x > 3 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0) && sortedRhoPreMu[i_idx].w < 0)
printf("too large/small density marker %d, type=%f\n", i_idx, sortedRhoPreMu[i_idx].w);
Real r0 = 0;
int Ni = 0;
Real mi_bar = 0;
Real3 inner_sum = mR3(0);
int3 gridPos = calcGridPos(posi);
// get address in grid
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posj = mR3(sortedPosRad[j]);
Real3 rij = Distance(posi, posj);
Real d = length(rij);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real3 eij = rij / d;
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
mi_bar += m_j;
Ni++;
r0 += d;
inner_sum += m_j * rij / (d * d * d);
Real h_ij = 0.5 * (h_j + h_i);
Real Wd = m_j * W3h(d, h_ij);
Real3 grad_ij = GradWh(rij, h_ij);
Real3 Velj = sortedVelMas[j];
Real p_j = sortedRhoPreMu[j].y;
Real rho_j = sortedRhoPreMu[j].x;
Real3 V_ij = (Veli - Velj);
// Only Consider (fluid-fluid + fluid-solid) or Solid-Fluid Interaction
if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[j].w < 0))
F_i_p += -m_j * ((p_i / (rho_i * rho_i)) + (p_j / (rho_j * rho_j))) * grad_ij;
Real Rho_bar = (rho_j + rho_i) * 0.5;
// Real nu = mu_0 * paramsD.HSML * 320 / Rho_bar;
// Real3 muNumerator = nu * fminf(0.0, dot(rij, V_ij)) * grad_ij;
Real3 muNumerator = 2 * mu_0 * dot(rij, grad_ij) * V_ij;
Real muDenominator = (Rho_bar * Rho_bar) * (d * d + paramsD.HSML * paramsD.HSML * epsilon);
// Only Consider (fluid-fluid + fluid-solid) or Solid-Fluid Interaction
if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[j].w < 0))
// if ((sortedRhoPreMu[i_idx].w < 0 && sortedRhoPreMu[j].w < 0))
F_i_mu += m_j * muNumerator / muDenominator;
if (!isfinite(length(F_i_mu))) {
printf("F_i_np in CalcForces returns Nan or Inf");
}
}
}
}
if (Ni != 0) {
r0 /= Ni;
mi_bar /= Ni;
}
if (mi_bar > EPSILON)
r_shift[i_idx] = paramsD.beta_shifting * r0 * r0 * paramsD.v_Max * dT / mi_bar * inner_sum;
// Forces are per unit mass at this point.
derivVelRhoD[i_idx] = mR4((F_i_p + F_i_mu) * m_i);
// Add the source_term only to the fluid markers
if (sortedRhoPreMu[i_idx].w == -1) {
derivVelRhoD[i_idx] = derivVelRhoD[i_idx] + mR4(source_term) * m_i;
}
new_vel[i_idx] = Veli + dT * mR3(derivVelRhoD[i_idx]) / m_i + r_shift[i_idx] / dT;
if (!isfinite(length(new_vel[i_idx])) || !isfinite(length(derivVelRhoD[i_idx])) ||
!isfinite(length(r_shift[i_idx])))
printf("%d= new_vel=%.2f,derivVelRhoD=%.2f,r_shift=%.2f, F_i_p=%f, F_i_mu=%f\n", i_idx,
length(new_vel[i_idx]), length(derivVelRhoD[i_idx]), length(r_shift[i_idx]), length(F_i_p),
length(F_i_mu));
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void FinalizePressure(Real4* sortedPosRad, // Read
Real4* sortedRhoPreMu,
Real* p_old,
Real3* F_p, // Write
uint* cellStart,
uint* cellEnd,
size_t numAllMarkers,
Real p_shift,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
return;
}
if (!(isfinite(sortedRhoPreMu[i_idx].x) && isfinite(sortedRhoPreMu[i_idx].y) && isfinite(sortedRhoPreMu[i_idx].z) &&
isfinite(sortedRhoPreMu[i_idx].w))) {
printf("rhoPreMu is NAN: thrown from FinalizePressure ! %f,%f,%f\\n", sortedRhoPreMu[i_idx].x,
sortedRhoPreMu[i_idx].y, sortedRhoPreMu[i_idx].z);
sortedRhoPreMu[i_idx].y = 0.0;
}
// if (p_shift < 0)
sortedRhoPreMu[i_idx].y = p_old[i_idx] + ((paramsD.ClampPressure) ? paramsD.BASEPRES : 0.0); //- p_shift;
if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0)
sortedRhoPreMu[i_idx].y = 0;
// if (sortedRhoPreMu[i_idx].y < 0)
// sortedRhoPreMu[i_idx].y = (p_old[i_idx] > 0) ? p_old[i_idx] : 0.0;
if (sortedRhoPreMu[i_idx].y > paramsD.Max_Pressure)
sortedRhoPreMu[i_idx].y = paramsD.Max_Pressure;
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceIISPH::calcPressureIISPH(std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD,
thrust::device_vector<Real3> pos_fsi_fea_D,
thrust::device_vector<Real3> vel_fsi_fea_D,
thrust::device_vector<Real3> acc_fsi_fea_D,
thrust::device_vector<Real> sumWij_inv,
thrust::device_vector<Real>& p_old,
thrust::device_vector<Real3> Normals,
thrust::device_vector<Real> G_i,
thrust::device_vector<Real>& Color) {
// Real RES = paramsH->PPE_res;
PPESolutionType mySolutionType = paramsH->PPE_Solution_type;
std::cout << "time step in calcPressureIISPH " << paramsH->dT << std::endl;
double total_step_timeClock = clock();
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------------------------------------------------------
// thread per particle
uint numThreads, numBlocks;
size_t numAllMarkers = (int)numObjectsH->numAllMarkers;
computeGridSize((uint)numAllMarkers, 256, numBlocks, numThreads);
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
thrust::device_vector<Real3> d_ii(numAllMarkers);
thrust::device_vector<Real3> V_np(numAllMarkers);
thrust::fill(d_ii.begin(), d_ii.end(), mR3(0.0));
thrust::fill(V_np.begin(), V_np.end(), mR3(0.0));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( V_i_np__AND__d_ii_kernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(d_ii), mR3CAST(V_np), R1CAST(sumWij_inv), R1CAST(G_i),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers,
isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n");
}
thrust::device_vector<Real> a_ii(numAllMarkers);
thrust::device_vector<Real> rho_np(numAllMarkers);
thrust::fill(a_ii.begin(), a_ii.end(), 0.0);
thrust::fill(rho_np.begin(), rho_np.end(), 0.0);
thrust::fill(p_old.begin(), p_old.end(), 0.0);
thrust::device_vector<Real3> summGradW(numAllMarkers);
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Rho_np_AND_a_ii_AND_sum_m_GradW), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(rho_np), R1CAST(a_ii),
R1CAST(p_old), mR3CAST(V_np), mR3CAST(d_ii), mR3CAST(summGradW), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n");
}
thrust::device_vector<Real3> V_new(numAllMarkers);
thrust::fill(V_new.begin(), V_new.end(), mR3(0.0));
thrust::device_vector<Real> a_ij;
thrust::device_vector<Real> B_i(numAllMarkers);
thrust::device_vector<uint> csrColIndA;
thrust::device_vector<uint> numContacts(numAllMarkers);
thrust::device_vector<unsigned long int> GlobalcsrColIndA;
thrust::device_vector<Real> csrValA;
double durationFormAXB;
size_t end_fluid = numObjectsH->numGhostMarkers + numObjectsH->numHelperMarkers + numObjectsH->numFluidMarkers;
size_t end_bndry = end_fluid + numObjectsH->numBoundaryMarkers;
size_t end_rigid = end_bndry + numObjectsH->numRigidMarkers;
size_t end_flex = end_rigid + numObjectsH->numFlexMarkers;
int4 updatePortion = mI4((int)end_fluid, (int)end_bndry, (int)end_rigid, (int)end_flex);
uint NNZ;
if (mySolutionType == PPESolutionType::FORM_SPARSE_MATRIX) {
thrust::fill(a_ij.begin(), a_ij.end(), 0.0);
thrust::fill(B_i.begin(), B_i.end(), 0.0);
// thrust::fill(summGradW.begin(), summGradW.end(), mR3(0.0));
thrust::fill(numContacts.begin(), numContacts.end(), 0.0);
//------------------------------------------------------------------------
//------------- MatrixJacobi
//------------------------------------------------------------------------
bool SPARSE_FLAG = true;
double FormAXBClock = clock();
thrust::device_vector<Real> Residuals(numAllMarkers);
thrust::fill(Residuals.begin(), Residuals.end(), 1.0);
thrust::device_vector<Real> rho_p(numAllMarkers);
thrust::fill(rho_p.begin(), rho_p.end(), 0.0);
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( CalcNumber_Contacts), dim3(numBlocks), dim3(numThreads), 0, 0,
U1CAST(numContacts), mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after CalcNumber_Contacts!\n");
}
uint MAX_CONTACT = thrust::reduce(numContacts.begin(), numContacts.end(), 0, thrust::maximum<Real>());
std::cout << "Max contact between SPH particles: " << MAX_CONTACT << std::endl;
uint LastVal = numContacts[numAllMarkers - 1];
thrust::exclusive_scan(numContacts.begin(), numContacts.end(), numContacts.begin());
numContacts.push_back(LastVal + numContacts[numAllMarkers - 1]);
NNZ = numContacts[numAllMarkers];
csrValA.resize(NNZ);
csrColIndA.resize(NNZ);
GlobalcsrColIndA.resize(NNZ);
thrust::fill(csrValA.begin(), csrValA.end(), 0.0);
thrust::fill(GlobalcsrColIndA.begin(), GlobalcsrColIndA.end(), 0.0);
thrust::fill(csrColIndA.begin(), csrColIndA.end(), 0.0);
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
std::cout << "updatePortion of BC: " << updatePortion.x << " " << updatePortion.y << " " << updatePortion.z
<< " " << updatePortion.w << "\n ";
hipLaunchKernelGGL(( FormAXB), dim3(numBlocks), dim3(numThreads), 0, 0,
R1CAST(csrValA), U1CAST(csrColIndA), LU1CAST(GlobalcsrColIndA), U1CAST(numContacts), R1CAST(a_ij),
R1CAST(B_i), mR3CAST(d_ii), R1CAST(a_ii), mR3CAST(summGradW), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(V_new), R1CAST(p_old),
mR3CAST(Normals), R1CAST(G_i), R1CAST(sumWij_inv), R1CAST(rho_np),
mR4CAST(otherFsiBodiesD->q_fsiBodies_D), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D),
mR3CAST(otherFsiBodiesD->posRigid_fsiBodies_D), mR4CAST(otherFsiBodiesD->velMassRigid_fsiBodies_D),
mR3CAST(otherFsiBodiesD->omegaVelLRF_fsiBodies_D), mR3CAST(otherFsiBodiesD->accRigid_fsiBodies_D),
mR3CAST(otherFsiBodiesD->omegaAccLRF_fsiBodies_D), U1CAST(fsiGeneralData->rigidIdentifierD),
mR3CAST(pos_fsi_fea_D), mR3CAST(vel_fsi_fea_D), mR3CAST(acc_fsi_fea_D),
U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D,
U2CAST(fsiGeneralData->CableElementsNodes), U4CAST(fsiGeneralData->ShellElementsNodes),
updatePortion, U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, SPARSE_FLAG, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n");
}
durationFormAXB = (clock() - FormAXBClock) / (double)CLOCKS_PER_SEC;
}
//------------------------------------------------------------------------
//------------- Iterative loop
//------------------------------------------------------------------------
int Iteration = 0;
Real MaxRes = 100;
thrust::device_vector<Real> Residuals(numAllMarkers);
thrust::fill(Residuals.begin(), Residuals.end(), 1.0);
thrust::device_vector<Real3> dij_pj(numAllMarkers);
thrust::fill(dij_pj.begin(), dij_pj.end(), mR3(0.0));
thrust::device_vector<Real3> F_p(numAllMarkers);
thrust::fill(F_p.begin(), F_p.end(), mR3(0.0));
thrust::device_vector<Real> rho_p(numAllMarkers);
thrust::fill(rho_p.begin(), rho_p.end(), 0.0);
double LinearSystemClock = clock();
myLinearSolver->SetVerbose(paramsH->Verbose_monitoring);
myLinearSolver->SetAbsRes(paramsH->LinearSolver_Abs_Tol);
myLinearSolver->SetRelRes(paramsH->LinearSolver_Rel_Tol);
myLinearSolver->SetIterationLimit(paramsH->LinearSolver_Max_Iter);
if (paramsH->USE_LinearSolver) {
if (paramsH->PPE_Solution_type != PPESolutionType::FORM_SPARSE_MATRIX) {
printf(
"You should paramsH->PPE_Solution_type == FORM_SPARSE_MATRIX in order to use the "
"chrono_fsi linear "
"solvers\n");
exit(0);
}
myLinearSolver->Solve((int)numAllMarkers, NNZ, R1CAST(csrValA), U1CAST(numContacts), U1CAST(csrColIndA),
R1CAST(p_old), R1CAST(B_i));
cudaCheckError();
} else {
while ((MaxRes > paramsH->LinearSolver_Abs_Tol || Iteration < 3) &&
Iteration < paramsH->LinearSolver_Max_Iter) {
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Initialize_Variables), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old),
mR3CAST(sortedSphMarkersD->velMasD), mR3CAST(V_new),
numAllMarkers, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after Initialize_Variables!\n");
}
if (mySolutionType == PPESolutionType::MATRIX_FREE) {
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Calc_dij_pj), dim3(numBlocks), dim3(numThreads), 0, 0,
mR3CAST(dij_pj), mR3CAST(F_p), mR3CAST(d_ii), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT,
numAllMarkers, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after Calc_dij_pj!\n");
}
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Calc_Pressure), dim3(numBlocks), dim3(numThreads), 0, 0,
R1CAST(a_ii), mR3CAST(d_ii), mR3CAST(dij_pj), R1CAST(rho_np), R1CAST(rho_p), R1CAST(Residuals),
mR3CAST(F_p), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR4CAST(otherFsiBodiesD->q_fsiBodies_D), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D),
mR3CAST(otherFsiBodiesD->posRigid_fsiBodies_D), mR4CAST(otherFsiBodiesD->velMassRigid_fsiBodies_D),
mR3CAST(otherFsiBodiesD->omegaVelLRF_fsiBodies_D), mR3CAST(otherFsiBodiesD->accRigid_fsiBodies_D),
mR3CAST(otherFsiBodiesD->omegaAccLRF_fsiBodies_D), U1CAST(fsiGeneralData->rigidIdentifierD),
mR3CAST(pos_fsi_fea_D), mR3CAST(vel_fsi_fea_D), mR3CAST(acc_fsi_fea_D),
U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D,
U2CAST(fsiGeneralData->CableElementsNodes), U4CAST(fsiGeneralData->ShellElementsNodes),
updatePortion, U1CAST(markersProximityD->gridMarkerIndexD), R1CAST(p_old), mR3CAST(V_new),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT,
numAllMarkers, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after Calc_Pressure!\n");
}
}
if (mySolutionType == PPESolutionType::FORM_SPARSE_MATRIX) {
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Calc_Pressure_AXB_USING_CSR), dim3(numBlocks), dim3(numThreads), 0, 0,
R1CAST(csrValA), R1CAST(a_ii), U1CAST(csrColIndA), U1CAST(numContacts),
mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(sumWij_inv), mR3CAST(sortedSphMarkersD->velMasD),
mR3CAST(V_new), R1CAST(p_old), R1CAST(B_i), R1CAST(Residuals), numAllMarkers, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after Iterative_pressure_update!\n");
}
}
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Update_AND_Calc_Res), dim3(numBlocks), dim3(numThreads), 0, 0,
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old),
mR3CAST(V_new), R1CAST(rho_p), R1CAST(rho_np), R1CAST(Residuals), numAllMarkers, Iteration,
paramsH->PPE_relaxation, false, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after Iterative_pressure_update!\n");
}
Iteration++;
thrust::device_vector<Real>::iterator iter = thrust::max_element(Residuals.begin(), Residuals.end());
auto position = iter - Residuals.begin();
MaxRes = *iter;
// MaxRes =
// thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::plus<Real>()) /
// numObjectsH->numAllMarkers;
// Real PMAX = thrust::reduce(p_old.begin(), p_old.end(), 0.0, thrust::maximum<Real>());
// MaxRes = thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::plus<Real>()) /
// numObjectsH->numAllMarkers;
// MaxRes = thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::maximum<Real>());
// Real R_np = thrust::reduce(rho_np.begin(), rho_np.end(), 0.0, thrust::plus<Real>()) /
// rho_np.size();
// Real R_p = thrust::reduce(rho_p.begin(), rho_p.end(), 0.0, thrust::plus<Real>()) /
// rho_p.size();
//
if (paramsH->Verbose_monitoring)
printf("Iter= %d, Res= %f\n", Iteration, MaxRes);
}
}
thrust::device_vector<Real>::iterator iter = thrust::min_element(p_old.begin(), p_old.end());
auto position = iter - p_old.begin();
Real shift_p = *iter;
// Real shift_p = 0;
// This must be run if linear solver is used
if (paramsH->USE_LinearSolver || paramsH->ClampPressure) {
printf("Shifting pressure values by %f\n", -shift_p);
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( FinalizePressure), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old), mR3CAST(F_p),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, shift_p,
isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after FinalizePressure!\n");
}
}
double durationLinearSystem = (clock() - LinearSystemClock) / (double)CLOCKS_PER_SEC;
double durationtotal_step_time = (clock() - total_step_timeClock) / (double)CLOCKS_PER_SEC;
printf("---------------IISPH CLOCK-------------------\n");
printf(" Total: %f \n FormAXB: %f\n Linear System: %f \n", durationtotal_step_time, durationFormAXB,
durationLinearSystem);
if (!paramsH->USE_LinearSolver)
printf(" Iter (Jacobi+SOR)# = %d, to Res= %.3e \n", Iteration, MaxRes);
if (paramsH->USE_LinearSolver)
if (myLinearSolver->GetSolverStatus()) {
std::cout << " Solver converged to " << myLinearSolver->GetResidual() << " tolerance";
std::cout << " after " << myLinearSolver->GetNumIterations() << " iterations" << std::endl;
} else {
std::cout << "Failed to converge after " << myLinearSolver->GetIterationLimit() << " iterations";
std::cout << " (" << myLinearSolver->GetResidual() << " final residual)" << std::endl;
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
hipFree(isErrorD);
free(isErrorH);
}
void ChFsiForceIISPH::ForceSPH(std::shared_ptr<SphMarkerDataD> otherSphMarkersD,
std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD,
std::shared_ptr<FsiMeshDataD> otherFsiMeshD) {
sphMarkersD = otherSphMarkersD;
int numAllMarkers = (int)numObjectsH->numAllMarkers;
int numHelperMarkers = (int)numObjectsH->numHelperMarkers;
fsiCollisionSystem->ArrangeData(sphMarkersD);
thrust::device_vector<Real3>::iterator iter =
thrust::max_element(sortedSphMarkersD->velMasD.begin(), sortedSphMarkersD->velMasD.end(), compare_Real3_mag());
Real MaxVel = length(*iter);
if (paramsH->Adaptive_time_stepping) {
Real dt_CFL = paramsH->Co_number * paramsH->HSML / MaxVel;
Real dt_nu = 0.25 * paramsH->HSML * paramsH->HSML / (paramsH->mu0 / paramsH->rho0);
Real dt_body = 0.25 * std::sqrt(paramsH->HSML / length(paramsH->bodyForce3 + paramsH->gravity));
Real dt = std::fmin(dt_body, std::fmin(dt_CFL, dt_nu));
if (dt / paramsH->dT_Max > 0.7 && dt / paramsH->dT_Max < 1)
paramsH->dT = paramsH->dT_Max * 0.5;
else
paramsH->dT = std::fmin(dt, paramsH->dT_Max);
CopyParams_NumberOfObjects(paramsH, numObjectsH);
printf(" time step=%.3e, dt_Max=%.3e, dt_CFL=%.3e (CFL=%.2g), dt_nu=%.3e, dt_body=%.3e\n", paramsH->dT,
paramsH->dT_Max, dt_CFL, paramsH->Co_number, dt_nu, dt_body);
}
bool *isErrorH, *isErrorD, *isErrorD2;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
hipMalloc((void**)&isErrorD2, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipMemcpy(isErrorD2, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
uint numThreads, numBlocks;
computeGridSize(numAllMarkers, 256, numBlocks, numThreads);
printf("numBlocks: %d, numThreads: %d, numAllMarker:%d \n", numBlocks, numThreads, numAllMarkers);
thrust::device_vector<Real> Color(numAllMarkers);
thrust::fill(Color.begin(), Color.end(), 1.0e10);
thrust::device_vector<Real> _sumWij_inv(numAllMarkers);
thrust::fill(_sumWij_inv.begin(), _sumWij_inv.end(), 0.0);
thrust::device_vector<Real> G_i(numAllMarkers * 9);
thrust::fill(G_i.begin(), G_i.end(), 0);
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
thrust::device_vector<uint> Contact_i(numAllMarkers);
thrust::fill(Contact_i.begin(), Contact_i.end(), 0);
hipLaunchKernelGGL(( calcRho_kernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), U1CAST(Contact_i), numAllMarkers,
isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after calcRho_kernel!\n");
}
thrust::device_vector<Real3> Normals(numAllMarkers);
hipLaunchKernelGGL(( calcNormalizedRho_kernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), R1CAST(G_i), mR3CAST(Normals), R1CAST(Color),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after calcNormalizedRho_kernel!\n");
}
thrust::device_vector<Real> p_old(numAllMarkers, 0.0);
calcPressureIISPH(otherFsiBodiesD, otherFsiMeshD->pos_fsi_fea_D, otherFsiMeshD->vel_fsi_fea_D,
otherFsiMeshD->acc_fsi_fea_D, _sumWij_inv, p_old, Normals, G_i, Color);
//------------------------------------------------------------------------
// thread per particle
// std::cout << "dT in ForceSPH after calcPressure: " << paramsH->dT << "\n";
double CalcForcesClock = clock();
thrust::fill(vel_vis_Sorted_D.begin(), vel_vis_Sorted_D.end(), mR3(0.0));
thrust::fill(derivVelRhoD_Sorted_D.begin(), derivVelRhoD_Sorted_D.end(), mR4(0.0));
thrust::fill(vel_XSPH_Sorted_D.begin(), vel_XSPH_Sorted_D.end(), mR3(0.0));
thrust::device_vector<Real3> dr_shift(numAllMarkers);
thrust::fill(dr_shift.begin(), dr_shift.end(), mR3(0.0));
thrust::device_vector<Real3> NEW_Vel(numAllMarkers, mR3(0.0));
hipLaunchKernelGGL(( CalcForces), dim3(numBlocks), dim3(numThreads), 0, 0, mR3CAST(NEW_Vel), mR4CAST(derivVelRhoD_Sorted_D),
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), R1CAST(p_old),
mR3CAST(dr_shift), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed in CalcForces!\n");
}
double calcforce = (clock() - CalcForcesClock) / (double)CLOCKS_PER_SEC;
printf(" Force Computation: %f \n", calcforce);
double UpdateClock = clock();
sortedSphMarkersD->velMasD = NEW_Vel;
hipLaunchKernelGGL(( UpdateDensity), dim3(numBlocks), dim3(numThreads), 0, 0,
mR3CAST(vel_vis_Sorted_D), mR3CAST(vel_XSPH_Sorted_D), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed in CalcForces!\n");
}
CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vel_XSPH_D, vel_XSPH_Sorted_D,
markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vis_vel_SPH_D, vel_vis_Sorted_D,
markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_NonInvasive_R3(sphMarkersD->velMasD, sortedSphMarkersD->velMasD,
markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_NonInvasive_R4(sphMarkersD->posRadD, sortedSphMarkersD->posRadD,
markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_NonInvasive_R4(sphMarkersD->rhoPresMuD, sortedSphMarkersD->rhoPresMuD,
markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_NonInvasive_R4(fsiGeneralData->derivVelRhoD, derivVelRhoD_Sorted_D,
markersProximityD->gridMarkerIndexD);
printf(" Update information: %f \n", (clock() - UpdateClock) / (double)CLOCKS_PER_SEC);
printf("----------------------------------------------\n");
}
} // namespace fsi
} // namespace chrono
| 61b2f301e056a658f5ee8b0d1ac3aeaff7060347.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha
// =============================================================================
#include <thrust/extrema.h>
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChFsiForceIISPH.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
#define RESOLUTION_LENGTH_MULT_IISPH 2.0
//==========================================================================================================================================
namespace chrono {
namespace fsi {
// double precision atomic add function
__device__ inline double datomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
ChFsiForceIISPH::ChFsiForceIISPH(std::shared_ptr<ChBce> otherBceWorker,
std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
bool verb)
: ChFsiForce(otherBceWorker,
otherSortedSphMarkersD,
otherMarkersProximityD,
otherFsiGeneralData,
otherParamsH,
otherNumObjects,
verb) {}
ChFsiForceIISPH::~ChFsiForceIISPH() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceIISPH::Initialize() {
ChFsiForce::Initialize();
cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
cudaMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
cudaDeviceSynchronize();
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void V_i_np__AND__d_ii_kernel(Real4* sortedPosRad, // input: sorted positions
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* d_ii,
Real3* V_i_np,
Real* sumWij_inv,
Real* G_tensor,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) {
return;
}
// sortedRhoPreMu[i_idx].x = sortedRhoPreMu[i_idx].x / sumWij_inv[i_idx];
Real h_i = sortedPosRad[i_idx].w;
Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real mu_0 = paramsD.mu0;
Real epsilon = paramsD.epsMinMarkersDis;
Real dT = delta_t;
Real3 source_term = paramsD.gravity + paramsD.bodyForce3;
Real RHO_0 = paramsD.rho0;
if (sortedRhoPreMu[i_idx].x < EPSILON) {
printf("density is %f,ref density= %f\n", sortedRhoPreMu[i_idx].x, RHO_0);
}
Real3 posi = mR3(sortedPosRad[i_idx]);
Real3 Veli = sortedVelMas[i_idx];
Real Rhoi = sortedRhoPreMu[i_idx].x;
Real3 My_d_ii = mR3(0);
Real3 My_F_i_np = mR3(0);
// get address in grid
int3 gridPos = calcGridPos(posi);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posj = mR3(sortedPosRad[j]);
Real3 rij = Distance(posi, posj);
Real d = length(rij);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real3 eij = rij / d;
Real3 Velj = sortedVelMas[j];
Real Rhoj = sortedRhoPreMu[j].x;
Real h_j = sortedPosRad[j].w;
if (Rhoj == 0) {
printf("Bug F_i_np__AND__d_ii_kernel i=%d j=%d, hi=%f, hj=%f\n", i_idx, j, h_i, h_j);
}
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_ij = GradWh(rij, h_ij);
My_d_ii += m_j * (-(dT * dT) / (Rhoi * Rhoi)) * grad_ij;
Real Rho_bar = (Rhoj + Rhoi) * 0.5;
Real3 V_ij = (Veli - Velj);
// Real nu = mu_0 * paramsD.HSML * 320 / Rho_bar;
// Real3 muNumerator = nu * fmin(0.0, dot(rij, V_ij)) * grad_ij;
Real3 muNumerator = 2 * mu_0 * dot(rij, grad_ij) * V_ij;
Real muDenominator = (Rho_bar * Rho_bar) * (d * d + h_ij * h_ij * epsilon);
// if ((sortedRhoPreMu[i_idx].w < 0 && sortedRhoPreMu[j].w < 0))
// if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 &&
// sortedRhoPreMu[j].w < 0))
My_F_i_np += m_j * muNumerator / muDenominator;
Real Wd = W3h(d, h_ij);
My_F_i_np -= paramsD.kappa / m_i * m_j * Wd * rij;
}
}
}
}
}
// if (!paramsD.Conservative_Form)
// My_F_i_np = mu_0 * LaplacainVi;
My_F_i_np *= m_i;
My_F_i_np += m_i * source_term;
d_ii[i_idx] = My_d_ii;
V_i_np[i_idx] = (My_F_i_np * dT + Veli); // This does not contain m_0?
}
//--------------------------------------------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Rho_np_AND_a_ii_AND_sum_m_GradW(Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real* rho_np, // Write
Real* a_ii, // Write
Real* p_old, // Write
Real3* V_np, // Read
Real3* d_ii, // Read
Real3* sum_m_GradW,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) {
return;
}
Real h_i = sortedPosRad[i_idx].w;
Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real3 posi = mR3(sortedPosRad[i_idx]);
Real3 Veli_np = V_np[i_idx];
Real Rho_i = sortedRhoPreMu[i_idx].x;
Real3 my_d_ii = d_ii[i_idx];
Real rho_temp = 0;
Real my_a_ii = 0;
Real3 My_sum_m_gradW = mR3(0);
Real dT = delta_t;
// get address in gridj
int3 gridPos = calcGridPos(posi);
//
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posj = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posi, posj);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real h_ij = 0.5 * (h_j + h_i);
Real3 Velj_np = V_np[j];
Real3 grad_i_wij = GradWh(dist3, h_ij);
rho_temp += m_j * dot((Veli_np - Velj_np), grad_i_wij);
Real3 d_ji = m_i * (-(dT * dT) / (Rho_i * Rho_i)) * (-grad_i_wij);
my_a_ii += m_j * dot((my_d_ii - d_ji), grad_i_wij);
My_sum_m_gradW += m_j * grad_i_wij;
}
}
}
}
}
rho_np[i_idx] = dT * rho_temp + sortedRhoPreMu[i_idx].x;
// Note: a_ii can become zero and when this can cause divide by 0 issues for free particles
a_ii[i_idx] = abs(my_a_ii) > EPSILON ? my_a_ii : 1.0;
sum_m_GradW[i_idx] = My_sum_m_gradW;
p_old[i_idx] = sortedRhoPreMu[i_idx].y; // = 1000; // Note that this is outside of the for loop
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_dij_pj(Real3* dij_pj, // write
Real3* F_p, // Write
Real3* d_ii, // Read
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* p_old,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers || sortedRhoPreMu[i_idx].w <= -2) {
return;
}
Real h_i = sortedPosRad[i_idx].w;
Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real3 my_F_p = mR3(0);
Real p_i_old = p_old[i_idx];
Real3 pos_i = mR3(sortedPosRad[i_idx]);
Real Rho_i = sortedRhoPreMu[i_idx].x;
if (sortedRhoPreMu[i_idx].x < EPSILON) {
printf("(Calc_dij_pj) My density is %f in Calc_dij_pj\n", sortedRhoPreMu[i_idx].x);
}
Real dT = delta_t;
Real3 My_dij_pj = mR3(0);
int3 gridPos = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
////CHECK THIS CONDITION!!!
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_i_wij = GradWh(dist3, h_ij);
Real Rho_j = sortedRhoPreMu[j].x;
Real p_j_old = p_old[j];
My_dij_pj += m_j * (-(dT * dT) / (Rho_j * Rho_j)) * grad_i_wij * p_j_old;
my_F_p += m_j * ((p_i_old / (Rho_i * Rho_i)) + (p_j_old / (Rho_j * Rho_j))) * grad_i_wij;
}
}
}
}
}
dij_pj[i_idx] = My_dij_pj;
F_p[i_idx] = -m_i * my_F_p;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcNumber_Contacts(uint* numContacts,
Real4* sortedPosRad,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
numContacts[i_idx] = 1;
return;
}
Real h_i = sortedPosRad[i_idx].w;
// Real m_i = h_i * h_i * h_i * paramsD.rho0;
int myType = sortedRhoPreMu[i_idx].w;
Real3 pos_i = mR3(sortedPosRad[i_idx]);
uint numCol[400];
int counter = 1;
numCol[0] = i_idx; // The first one is always the idx of the marker itself
int3 gridPos = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) {
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
bool AlreadyHave = false;
for (uint findCol = 1; findCol <= counter; findCol++) {
if (numCol[findCol] == j) {
AlreadyHave = true;
continue;
}
}
// Room for improvment ...
if (!AlreadyHave) {
numCol[counter] = j;
counter++;
// Do not count BCE-BCE interactions...
if (myType >= 0 && sortedRhoPreMu[j].w >= 0 && paramsD.bceType == BceVersion::ADAMI)
counter--;
}
if (myType != -1) // For BCE no need to go deeper than this...
continue;
Real h_j = sortedPosRad[j].w;
int3 gridPosJ = calcGridPos(pos_j);
for (int zz = -1; zz <= 1; zz++) {
for (int yy = -1; yy <= 1; yy++) {
for (int xx = -1; xx <= 1; xx++) {
int3 neighbourPosJ = gridPosJ + mI3(xx, yy, zz);
uint gridHashJ = calcGridHash(neighbourPosJ);
uint startIndexJ = cellStart[gridHashJ];
if (startIndexJ != 0xffffffff) { // cell is not empty
uint endIndexJ = cellEnd[gridHashJ];
for (uint k = startIndexJ; k < endIndexJ; k++) {
Real3 pos_k = mR3(sortedPosRad[k]);
Real3 dist3jk = Distance(pos_j, pos_k);
Real djk = length(dist3jk);
if (djk > RESOLUTION_LENGTH_MULT * h_j || k == j || k == i_idx ||
sortedRhoPreMu[k].w <= -2)
continue;
bool AlreadyHave2 = false;
for (uint findCol = 1; findCol <= counter; findCol++) {
if (numCol[findCol] == k) {
AlreadyHave2 = true;
continue;
}
}
if (!AlreadyHave2) {
numCol[counter] = k;
counter++;
}
}
}
}
}
}
}
}
}
}
}
numContacts[i_idx] = counter + 10;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_summGradW(Real3* summGradW, // write
Real4* sortedPosRad,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
return;
}
Real h_i = sortedPosRad[i_idx].w;
// Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real3 pos_i = mR3(sortedPosRad[i_idx]);
Real3 My_summgradW = mR3(0);
// Real dT = paramsD.dT;
int3 gridPos = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_i_wij = GradWh(dist3, h_ij);
My_summgradW += m_j * grad_i_wij;
}
}
}
}
}
summGradW[i_idx] = My_summgradW;
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ void Calc_BC_aij_Bi(const uint i_idx,
Real* csrValA,
uint* csrColIndA,
unsigned long int* GlobalcsrColIndA,
uint* numContacts,
// The above 4 vectors are used for CSR form.
Real* a_ii, // write
Real* B_i,
Real4* sortedPosRad,
Real3* sortedVelMas,
const Real4* sortedRhoPreMu,
Real3* V_new,
Real* p_old,
Real3* Normals,
Real* G_i,
Real* sumWij_inv,
Real4* qD,
Real3* rigidSPH_MeshPos_LRF_D,
Real3* posRigid_fsiBodies_D,
Real4* velMassRigid_fsiBodies_D,
Real3* omegaVelLRF_fsiBodies_D,
Real3* accRigid_fsiBodies_D,
Real3* omegaAccLRF_fsiBodies_D,
uint* rigidIdentifierD,
Real3* pos_fsi_fea_D,
Real3* vel_fsi_fea_D,
Real3* acc_fsi_fea_D,
uint* FlexIdentifierD,
const int numFlex1D,
uint2* CableElementsNodes,
uint4* ShellelementsNodes,
int4 updatePortion,
uint* gridMarkerIndexD,
const uint* cellStart,
const uint* cellEnd,
const size_t numAllMarkers,
bool IsSPARSE) {
uint csrStartIdx = numContacts[i_idx] + 1;
uint csrEndIdx = numContacts[i_idx + 1];
Real h_i = sortedPosRad[i_idx].w;
// Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real3 my_normal = Normals[i_idx];
Real3 source_term = paramsD.gravity + paramsD.bodyForce3;
// if (bceIndex >= numObjectsD.numRigidMarkers) {
// return;
// }
// int Original_idx = gridMarkerIndexD[i_idx];
Real3 myAcc = mR3(0.0);
Real3 V_prescribed = mR3(0.0);
// if (!(sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[i_idx].w <= 3))
// printf("type of marker is %f\n", sortedRhoPreMu[i_idx].w);
BCE_Vel_Acc(i_idx, myAcc, V_prescribed, sortedPosRad, updatePortion, gridMarkerIndexD, qD, rigidSPH_MeshPos_LRF_D,
posRigid_fsiBodies_D, velMassRigid_fsiBodies_D, omegaVelLRF_fsiBodies_D, accRigid_fsiBodies_D,
omegaAccLRF_fsiBodies_D, rigidIdentifierD, pos_fsi_fea_D, vel_fsi_fea_D, acc_fsi_fea_D, FlexIdentifierD,
numFlex1D, CableElementsNodes, ShellelementsNodes);
for (int c = csrStartIdx; c < csrEndIdx; c++) {
csrValA[c] = 0;
csrColIndA[c] = i_idx;
GlobalcsrColIndA[c] = i_idx + numAllMarkers * i_idx;
}
// if ((csrEndIdx - csrStartIdx) != uint(0)) {
Real3 numeratorv = mR3(0);
Real denumenator = 0;
Real pRHS = 0;
// Real Rho_i = sortedRhoPreMu[i_idx].x;
Real3 pos_i = mR3(sortedPosRad[i_idx]);
// get address in grid
int3 gridPos = calcGridPos(pos_i);
uint counter = 0;
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || j == i_idx)
continue;
Real h_j = sortedPosRad[j].w;
// Real m_j = h_j * h_j * h_j * paramsD.rho0;
// Real rhoj = sortedRhoPreMu[j].x;
Real h_ij = 0.5 * (h_j + h_i);
Real Wd = W3h(d, h_ij);
Real3 Vel_j = sortedVelMas[j];
if (paramsD.bceType != BceVersion::ADAMI) {
if (sortedRhoPreMu[j].w == -1.0 || dot(my_normal, mR3(pos_i - pos_j)) > 0) {
Real3 grad_i_wij = GradWh(dist3, h_ij);
csrValA[csrStartIdx - 1] += dot(grad_i_wij, my_normal);
csrValA[counter + csrStartIdx] = -dot(grad_i_wij, my_normal);
csrColIndA[counter + csrStartIdx] = j;
GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx;
counter++;
if (sortedRhoPreMu[j].w != -1)
continue;
numeratorv += Vel_j * Wd;
denumenator += Wd;
}
} else {
if (sortedRhoPreMu[j].w != -1 || sortedRhoPreMu[j].w <= -2)
continue;
numeratorv += Vel_j * Wd;
denumenator += Wd;
pRHS += dot(source_term - myAcc, dist3) * sortedRhoPreMu[j].x * Wd;
csrValA[counter + csrStartIdx] = -Wd;
csrColIndA[counter + csrStartIdx] = j;
GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx;
counter++;
}
}
}
}
}
}
if (abs(denumenator) < EPSILON) {
V_new[i_idx] = 2 * V_prescribed;
B_i[i_idx] = 0;
if (paramsD.bceType == BceVersion::ADAMI) {
csrValA[csrStartIdx - 1] = a_ii[i_idx];
csrColIndA[csrStartIdx - 1] = i_idx;
GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx;
}
} else {
Real Scaling = a_ii[i_idx] / denumenator;
V_new[i_idx] = 2 * V_prescribed - numeratorv / denumenator;
if (paramsD.bceType == BceVersion::ADAMI) {
B_i[i_idx] = pRHS;
csrValA[csrStartIdx - 1] = denumenator;
csrColIndA[csrStartIdx - 1] = i_idx;
GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx;
for (int i = csrStartIdx - 1; i < csrEndIdx; i++)
csrValA[i] *= Scaling;
B_i[i_idx] *= Scaling;
}
}
if (paramsD.bceType != BceVersion::ADAMI) {
Real Scaling = a_ii[i_idx];
if (abs(csrValA[csrStartIdx - 1]) > EPSILON) {
Scaling = a_ii[i_idx]; // csrValA[csrStartIdx - 1];
for (int count = csrStartIdx - 1; count < csrEndIdx; count++)
csrValA[count] *= Scaling;
} else {
clearRow(i_idx, csrStartIdx - 1, csrEndIdx, csrValA, B_i);
for (int count = csrStartIdx - 1; count < csrEndIdx; count++) {
int j = csrColIndA[counter];
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || j == i_idx) {
csrValA[count] = 0.0;
continue;
}
Real h_j = sortedPosRad[j].w;
Real h_ij = 0.5 * (h_j + h_i);
Real Wd = W3h(d, h_ij);
csrValA[count] = sumWij_inv[j] * Wd * Scaling;
}
csrValA[csrStartIdx - 1] -= 1.0 * Scaling;
}
B_i[i_idx] = 0.0 * Scaling;
}
sortedVelMas[i_idx] = V_new[i_idx];
} // namespace fsi
//--------------------------------------------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------------------------------------------
__device__ void Calc_fluid_aij_Bi(const uint i_idx,
Real* csrValA,
uint* csrColIndA,
unsigned long int* GlobalcsrColIndA,
uint* numContacts,
// The above 4 vectors are used for CSR form.
Real* B_i,
Real3* d_ii, // Read
Real* a_ii, // Read
Real* rho_np, // Read
Real3* summGradW,
Real4* sortedPosRad,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const int numAllMarkers,
bool IsSPARSE) {
Real3 pos_i = mR3(sortedPosRad[i_idx]);
Real dT = delta_t;
int counter = 0; // There is always one non-zero at each row- The marker itself
B_i[i_idx] = paramsD.rho0 - rho_np[i_idx];
uint csrStartIdx = numContacts[i_idx] + 1; // Reserve the starting index for the A_ii
uint csrEndIdx = numContacts[i_idx + 1];
Real h_i = sortedPosRad[i_idx].w;
// Real m_i = h_i * h_i * h_i * paramsD.rho0;
// for (int c = csrStartIdx; c < csrEndIdx; c++) {
// csrValA[c] = a_ii[i_idx];
// csrColIndA[c] = i_idx;
// GlobalcsrColIndA[c] = i_idx + numAllMarkers * i_idx;
// }
int3 gridPos = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
// Real Rho_i = sortedRhoPreMu[i_idx].x;
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real h_j = sortedPosRad[j].w;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_i_wij = GradWh(dist3, h_ij);
Real Rho_j = sortedRhoPreMu[j].x;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real3 d_it = m_j * (-(dT * dT) / (Rho_j * Rho_j)) * grad_i_wij;
Real My_a_ij_1 = m_j * dot(d_it, summGradW[i_idx]);
Real My_a_ij_2 = m_j * dot(d_ii[j], grad_i_wij);
Real My_a_ij_12 = My_a_ij_1 - My_a_ij_2;
bool DONE1 = false;
for (uint findCol = csrStartIdx; findCol < csrEndIdx; findCol++) {
if (csrColIndA[findCol] == j) {
csrValA[findCol] += My_a_ij_12;
csrColIndA[findCol] = j;
GlobalcsrColIndA[findCol] = j + numAllMarkers * i_idx;
DONE1 = true;
continue;
}
}
if (!DONE1) {
csrValA[counter + csrStartIdx] += My_a_ij_12;
csrColIndA[counter + csrStartIdx] = j;
GlobalcsrColIndA[counter + csrStartIdx] = j + numAllMarkers * i_idx;
counter++;
}
int3 gridPosJ = calcGridPos(pos_j);
for (int zz = -1; zz <= 1; zz++) {
for (int yy = -1; yy <= 1; yy++) {
for (int xx = -1; xx <= 1; xx++) {
int3 neighbourPosJ = gridPosJ + mI3(xx, yy, zz);
uint gridHashJ = calcGridHash(neighbourPosJ);
uint startIndexJ = cellStart[gridHashJ];
if (startIndexJ != 0xffffffff) { // cell is not empty
uint endIndexJ = cellEnd[gridHashJ];
for (uint k = startIndexJ; k < endIndexJ; k++) {
Real3 pos_k = mR3(sortedPosRad[k]);
Real3 dist3jk = Distance(pos_j, pos_k);
Real djk = length(dist3jk);
if (djk > RESOLUTION_LENGTH_MULT_IISPH * h_j || k == j || k == i_idx ||
sortedRhoPreMu[k].w <= -2)
continue;
Real h_k = sortedPosRad[j].w;
Real h_jk = 0.5 * (h_j + h_k);
Real3 grad_j_wjk = GradWh(dist3jk, h_jk);
Real m_k = cube(sortedPosRad[k].w) * paramsD.rho0;
Real Rho_k = sortedRhoPreMu[k].x;
Real3 d_jk = m_k * (-(dT * dT) / (Rho_k * Rho_k)) * grad_j_wjk;
Real My_a_ij_3 = m_j * dot(d_jk, grad_i_wij);
bool DONE2 = false;
for (uint findCol = csrStartIdx; findCol < csrEndIdx; findCol++) {
if (csrColIndA[findCol] == k) {
csrValA[findCol] -= My_a_ij_3;
csrColIndA[findCol] = k;
GlobalcsrColIndA[findCol] = k + numAllMarkers * i_idx;
DONE2 = true;
continue;
}
}
if (!DONE2) {
csrValA[counter + csrStartIdx] -= My_a_ij_3;
csrColIndA[counter + csrStartIdx] = k;
GlobalcsrColIndA[counter + csrStartIdx] = k + numAllMarkers * i_idx;
counter++;
}
}
}
}
}
}
}
}
}
}
}
for (int myIdx = csrStartIdx; myIdx < csrEndIdx; myIdx++) {
if (csrColIndA[myIdx] == i_idx)
csrValA[myIdx] = a_ii[i_idx];
}
csrValA[csrStartIdx - 1] = a_ii[i_idx];
csrColIndA[csrStartIdx - 1] = i_idx;
GlobalcsrColIndA[csrStartIdx - 1] = i_idx + numAllMarkers * i_idx;
if (sortedRhoPreMu[i_idx].x < 0.999 * paramsD.rho0) {
csrValA[csrStartIdx - 1] = a_ii[i_idx];
for (int myIdx = csrStartIdx; myIdx < csrEndIdx; myIdx++) {
csrValA[myIdx] = 0.0;
B_i[i_idx] = 0.0;
}
}
Real RHS = B_i[i_idx];
B_i[i_idx] = RHS; // fminf(0.0, RHS);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void FormAXB(Real* csrValA,
uint* csrColIndA,
unsigned long int* GlobalcsrColIndA,
uint* numContacts,
// The above 4 vectors are used for CSR form.
Real* a_ij, // write
Real* B_i, // write
Real3* d_ii, // Read
Real* a_ii, // Read
Real3* summGradW,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* V_new,
Real* p_old,
Real3* Normals,
Real* G_i,
Real* sumWij_inv,
Real* rho_np,
Real4* qD,
Real3* rigidSPH_MeshPos_LRF_D,
Real3* posRigid_fsiBodies_D,
Real4* velMassRigid_fsiBodies_D,
Real3* omegaVelLRF_fsiBodies_D,
Real3* accRigid_fsiBodies_D,
Real3* omegaAccLRF_fsiBodies_D,
uint* rigidIdentifierD,
Real3* pos_fsi_fea_D,
Real3* vel_fsi_fea_D,
Real3* acc_fsi_fea_D,
uint* FlexIdentifierD,
const int numFlex1D,
uint2* CableElementsNodes,
uint4* ShellelementsNodes,
int4 updatePortion,
uint* gridMarkerIndexD,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const size_t numAllMarkers,
bool IsSPARSE,
volatile bool* isError) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
// Real m_0 = paramsD.markerMass;
// Real RHO_0 = paramsD.rho0;
// Real dT = paramsD.dT;
// Real3 gravity = paramsD.gravity;
int TYPE_OF_NARKER = sortedRhoPreMu[i_idx].w;
if (TYPE_OF_NARKER <= -2) {
B_i[i_idx] = 0;
uint csrStartIdx = numContacts[i_idx];
// This needs to be check to see if it messes up the condition number of the matrix
csrValA[csrStartIdx] = 1.0;
csrColIndA[csrStartIdx] = i_idx;
GlobalcsrColIndA[csrStartIdx] = i_idx + numAllMarkers * i_idx;
} else if (TYPE_OF_NARKER == -1) {
Calc_fluid_aij_Bi(i_idx, csrValA, csrColIndA, GlobalcsrColIndA, numContacts, B_i, d_ii, a_ii, rho_np, summGradW,
sortedPosRad, sortedRhoPreMu, cellStart, cellEnd, delta_t, numAllMarkers, true);
} else if (TYPE_OF_NARKER > -1)
Calc_BC_aij_Bi(i_idx, csrValA, csrColIndA, GlobalcsrColIndA, numContacts, a_ii, B_i, sortedPosRad, sortedVelMas,
sortedRhoPreMu, V_new, p_old, Normals, G_i, sumWij_inv,
qD, rigidSPH_MeshPos_LRF_D, posRigid_fsiBodies_D, velMassRigid_fsiBodies_D,
omegaVelLRF_fsiBodies_D, accRigid_fsiBodies_D, omegaAccLRF_fsiBodies_D, rigidIdentifierD,
pos_fsi_fea_D, vel_fsi_fea_D, acc_fsi_fea_D, FlexIdentifierD, numFlex1D, CableElementsNodes,
ShellelementsNodes,
updatePortion, gridMarkerIndexD, cellStart, cellEnd, numAllMarkers, true);
}
//--------------------------------------------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_Pressure_AXB_USING_CSR(Real* csrValA,
Real* a_ii,
uint* csrColIndA,
uint* numContacts,
Real4* sortedRhoPreMu,
Real* sumWij_inv,
Real3* sortedVelMas,
Real3* V_new,
Real* p_old,
Real* B_i, // Read
Real* Residuals,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
return;
}
// Real RHO_0 = paramsD.rho0;
// bool ClampPressure = paramsD.ClampPressure;
// Real Max_Pressure = paramsD.Max_Pressure;
uint startIdx = numContacts[i_idx] + 1; // numContacts[i_idx] is the diagonal itself
uint endIdx = numContacts[i_idx + 1];
Real aij_pj = 0;
// Real error = aij_pj + sortedRhoPreMu[i_idx].y * csrValA[startIdx - 1] - B_i[i_idx];
for (int myIdx = startIdx; myIdx < endIdx; myIdx++) {
if (csrColIndA[myIdx] != i_idx)
aij_pj += csrValA[myIdx] * p_old[csrColIndA[myIdx]];
}
Real RHS = B_i[i_idx];
Residuals[i_idx] = abs(RHS - aij_pj - p_old[i_idx] * csrValA[startIdx - 1]);
sortedRhoPreMu[i_idx].y = (RHS - aij_pj) / csrValA[startIdx - 1];
// if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0)
// sortedRhoPreMu[i_idx].y = 0;
if (!isfinite(aij_pj)) {
printf("a_ij *p_j became Nan in Calc_Pressure_AXB_USING_CSR ");
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Calc_Pressure(Real* a_ii, // Read
Real3* d_ii, // Read
Real3* dij_pj, // Read
Real* rho_np, // Read
Real* rho_p, // Write
Real* Residuals,
Real3* F_p,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real4* qD,
Real3* rigidSPH_MeshPos_LRF_D,
Real3* posRigid_fsiBodies_D,
Real4* velMassRigid_fsiBodies_D,
Real3* omegaVelLRF_fsiBodies_D,
Real3* accRigid_fsiBodies_D,
Real3* omegaAccLRF_fsiBodies_D,
uint* rigidIdentifierD,
Real3* pos_fsi_fea_D,
Real3* vel_fsi_fea_D,
Real3* acc_fsi_fea_D,
uint* FlexIdentifierD,
const int numFlex1D,
uint2* CableElementsNodes,
uint4* ShellelementsNodes,
int4 updatePortion,
uint* gridMarkerIndexD,
Real* p_old,
Real3* V_new,
uint* cellStart,
uint* cellEnd,
Real delta_t,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
return;
}
Real h_i = sortedPosRad[i_idx].w;
Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real RHO_0 = paramsD.rho0;
Real dT = delta_t;
Real3 source_term = paramsD.gravity + paramsD.bodyForce3;
if (sortedRhoPreMu[i_idx].x < EPSILON) {
printf("(Calc_Pressure)My density is %f in Calc_Pressure\n", sortedRhoPreMu[i_idx].x);
}
int myType = sortedRhoPreMu[i_idx].w;
Real Rho_i = sortedRhoPreMu[i_idx].x;
Real p_i = p_old[i_idx];
Real3 pos_i = mR3(sortedPosRad[i_idx]);
Real p_new = 0;
Real my_rho_p = 0;
Real3 F_i_p = F_p[i_idx];
if (myType == -1) {
if (Rho_i < 0.999 * RHO_0) {
p_new = 0;
Residuals[i_idx] = 0;
} else {
Real3 my_dij_pj = dij_pj[i_idx];
Real sum_dij_pj = 0; // This is the first summation term in the expression for the pressure.
Real sum_djj_pj = 0; // This is the second summation term in the expression for the pressure.
Real sum_djk_pk = 0; // This is the last summation term in the expression for the pressure.
int3 gridPosI = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPosI = gridPosI + mI3(x, y, z);
uint gridHashI = calcGridHash(neighbourPosI);
// get start of bucket for this cell
uint startIndexI = cellStart[gridHashI];
if (startIndexI != 0xffffffff) {
uint endIndexI = cellEnd[gridHashI];
for (uint j = startIndexI; j < endIndexI; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3ij = Distance(pos_i, pos_j);
Real dij = length(dist3ij);
if (dij > RESOLUTION_LENGTH_MULT * paramsD.HSML || i_idx == j ||
sortedRhoPreMu[j].w <= -2)
continue;
// Real Rho_j = sortedRhoPreMu[j].x;
Real p_j_old = p_old[j];
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
Real3 djj = d_ii[j];
Real3 F_j_p = F_p[j];
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_i_wij = GradWh(dist3ij, h_ij);
Real3 d_ji = m_i * (-(dT * dT) / (Rho_i * Rho_i)) * (-grad_i_wij);
Real3 djk_pk = dij_pj[j] - d_ji * p_i;
sum_dij_pj += m_j * dot(my_dij_pj, grad_i_wij);
sum_djj_pj += m_j * dot(djj, grad_i_wij) * p_j_old;
sum_djk_pk += m_j * dot(djk_pk, grad_i_wij);
my_rho_p += (dT * dT) * m_j * dot((F_i_p / m_i - F_j_p / m_j), grad_i_wij);
}
}
}
}
}
// Real RHS = fminf(0.0, RHO_0 - rho_np[i_idx]);
Real RHS = RHO_0 - rho_np[i_idx];
Real aij_pj = +sum_dij_pj - sum_djj_pj - sum_djk_pk;
p_new = (RHS - aij_pj) / a_ii[i_idx];
Residuals[i_idx] = abs(RHS - aij_pj - p_old[i_idx] * a_ii[i_idx]);
// sortedRhoPreMu[i_idx].x = aij_pj + p_new * a_ii[i_idx] + RHO_0 - RHS;
}
} else { // Do Adami BC
Real3 myAcc = mR3(0);
Real3 V_prescribed = mR3(0);
BCE_Vel_Acc(i_idx, myAcc, V_prescribed, sortedPosRad, updatePortion, gridMarkerIndexD, qD,
rigidSPH_MeshPos_LRF_D, posRigid_fsiBodies_D, velMassRigid_fsiBodies_D, omegaVelLRF_fsiBodies_D,
accRigid_fsiBodies_D, omegaAccLRF_fsiBodies_D, rigidIdentifierD, pos_fsi_fea_D, vel_fsi_fea_D,
acc_fsi_fea_D, FlexIdentifierD, numFlex1D, CableElementsNodes, ShellelementsNodes);
Real3 numeratorv = mR3(0);
Real denumenator = 0;
Real numeratorp = 0;
Real3 Vel_i;
// get address in grid
int3 gridPos = calcGridPos(pos_i);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 pos_j = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(pos_i, pos_j);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML || sortedRhoPreMu[j].w != -1)
continue;
// OLD VELOCITY IS SHOULD BE OBDATED NOT THE NEW ONE!!!!!
Real3 Vel_j = sortedVelMas[j];
Real p_j = p_old[j];
Real3 F_j_p = F_p[j];
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
// Real rhoj = sortedRhoPreMu[j].x;
Real h_ij = 0.5 * (h_j + h_i);
Real Wd = W3h(d, h_ij);
numeratorv += Vel_j * Wd;
numeratorp += p_j * Wd + dot(source_term - myAcc, dist3) * sortedRhoPreMu[j].x * Wd;
denumenator += Wd;
Real3 TobeUsed = (F_i_p / m_i - F_j_p / m_j);
my_rho_p += (dT * dT) * m_j * dot(TobeUsed, GradWh(dist3, h_ij));
if (isnan(numeratorp))
printf("Something is wrong here..., %f\n", numeratorp);
}
}
}
}
}
if (abs(denumenator) < EPSILON) {
p_new = 0;
Vel_i = 2 * V_prescribed;
} else {
Vel_i = 2 * V_prescribed - numeratorv / denumenator;
p_new = numeratorp / denumenator;
}
Residuals[i_idx] = abs(numeratorp - denumenator * p_old[i_idx]) * a_ii[i_idx];
V_new[i_idx] = Vel_i;
}
// if (paramsD.ClampPressure && p_new < 0.0)
// p_new = 0.0;
rho_p[i_idx] = my_rho_p;
sortedRhoPreMu[i_idx].y = p_new;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Update_AND_Calc_Res(Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* p_old,
Real3* V_new,
Real* rho_p,
Real* rho_np,
Real* Residuals,
const size_t numAllMarkers,
const int Iteration,
Real params_relaxation,
bool IsSPARSE,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
return;
}
// p_i = (1 - relax) * p_old_i + relax * p_i;
sortedRhoPreMu[i_idx].y = (1 - params_relaxation) * p_old[i_idx] + params_relaxation * sortedRhoPreMu[i_idx].y;
// if(!paramsD.USE_LinearSolver)
// p_old[i_idx] = sortedRhoPreMu[i_idx].y;
// if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0)
// sortedRhoPreMu[i_idx].y = 0;
// Real AbsRes = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]);
// Real Updated_rho = rho_np[i_idx] + rho_p[i_idx];
// Real rho_res = abs(1000 - sortedRhoPreMu[i_idx].x); // Hard-coded for now
Real p_res = 0;
// p_res = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]) / (abs(p_old[i_idx]) + 0.00001);
p_res = abs(sortedRhoPreMu[i_idx].y - p_old[i_idx]);
p_old[i_idx] = sortedRhoPreMu[i_idx].y;
Residuals[i_idx] = p_res;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcForces(Real3* new_vel, // Write
Real4* derivVelRhoD,
Real4* sortedPosRad, // Read
Real3* sortedVelMas, // Read
Real4* sortedRhoPreMu,
Real* sumWij_inv,
Real* p_old,
Real3* r_shift,
uint* cellStart,
uint* cellEnd,
Real delta_t,
size_t numAllMarkers,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
sortedRhoPreMu[i_idx].x = 0;
sortedRhoPreMu[i_idx].y = 0;
sortedRhoPreMu[i_idx].z = 0;
return;
}
// if (sortedRhoPreMu[i_idx].w > -1) {
// return;
// }
Real mu_0 = paramsD.mu0;
Real h_i = sortedPosRad[i_idx].w;
Real m_i = h_i * h_i * h_i * paramsD.rho0;
Real dT = delta_t;
Real3 source_term = paramsD.gravity + paramsD.bodyForce3;
Real epsilon = paramsD.epsMinMarkersDis;
Real3 posi = mR3(sortedPosRad[i_idx]);
Real3 Veli = sortedVelMas[i_idx];
Real p_i;
// if (sortedRhoPreMu[i_idx].w == -1)
p_i = sortedRhoPreMu[i_idx].y;
// else
// p_i = p_old[i_idx];
Real rho_i = sortedRhoPreMu[i_idx].x;
Real3 F_i_mu = mR3(0);
Real3 F_i_surface_tension = mR3(0);
Real3 F_i_p = mR3(0);
if ((sortedRhoPreMu[i_idx].x > 3 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0) && sortedRhoPreMu[i_idx].w < 0)
printf("too large/small density marker %d, type=%f\n", i_idx, sortedRhoPreMu[i_idx].w);
Real r0 = 0;
int Ni = 0;
Real mi_bar = 0;
Real3 inner_sum = mR3(0);
int3 gridPos = calcGridPos(posi);
// get address in grid
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posj = mR3(sortedPosRad[j]);
Real3 rij = Distance(posi, posj);
Real d = length(rij);
if (d > RESOLUTION_LENGTH_MULT * h_i || sortedRhoPreMu[j].w <= -2 || i_idx == j)
continue;
Real3 eij = rij / d;
Real h_j = sortedPosRad[j].w;
Real m_j = h_j * h_j * h_j * paramsD.rho0;
mi_bar += m_j;
Ni++;
r0 += d;
inner_sum += m_j * rij / (d * d * d);
Real h_ij = 0.5 * (h_j + h_i);
Real Wd = m_j * W3h(d, h_ij);
Real3 grad_ij = GradWh(rij, h_ij);
Real3 Velj = sortedVelMas[j];
Real p_j = sortedRhoPreMu[j].y;
Real rho_j = sortedRhoPreMu[j].x;
Real3 V_ij = (Veli - Velj);
// Only Consider (fluid-fluid + fluid-solid) or Solid-Fluid Interaction
if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[j].w < 0))
F_i_p += -m_j * ((p_i / (rho_i * rho_i)) + (p_j / (rho_j * rho_j))) * grad_ij;
Real Rho_bar = (rho_j + rho_i) * 0.5;
// Real nu = mu_0 * paramsD.HSML * 320 / Rho_bar;
// Real3 muNumerator = nu * fminf(0.0, dot(rij, V_ij)) * grad_ij;
Real3 muNumerator = 2 * mu_0 * dot(rij, grad_ij) * V_ij;
Real muDenominator = (Rho_bar * Rho_bar) * (d * d + paramsD.HSML * paramsD.HSML * epsilon);
// Only Consider (fluid-fluid + fluid-solid) or Solid-Fluid Interaction
if (sortedRhoPreMu[i_idx].w < 0 || (sortedRhoPreMu[i_idx].w >= 0 && sortedRhoPreMu[j].w < 0))
// if ((sortedRhoPreMu[i_idx].w < 0 && sortedRhoPreMu[j].w < 0))
F_i_mu += m_j * muNumerator / muDenominator;
if (!isfinite(length(F_i_mu))) {
printf("F_i_np in CalcForces returns Nan or Inf");
}
}
}
}
if (Ni != 0) {
r0 /= Ni;
mi_bar /= Ni;
}
if (mi_bar > EPSILON)
r_shift[i_idx] = paramsD.beta_shifting * r0 * r0 * paramsD.v_Max * dT / mi_bar * inner_sum;
// Forces are per unit mass at this point.
derivVelRhoD[i_idx] = mR4((F_i_p + F_i_mu) * m_i);
// Add the source_term only to the fluid markers
if (sortedRhoPreMu[i_idx].w == -1) {
derivVelRhoD[i_idx] = derivVelRhoD[i_idx] + mR4(source_term) * m_i;
}
new_vel[i_idx] = Veli + dT * mR3(derivVelRhoD[i_idx]) / m_i + r_shift[i_idx] / dT;
if (!isfinite(length(new_vel[i_idx])) || !isfinite(length(derivVelRhoD[i_idx])) ||
!isfinite(length(r_shift[i_idx])))
printf("%d= new_vel=%.2f,derivVelRhoD=%.2f,r_shift=%.2f, F_i_p=%f, F_i_mu=%f\n", i_idx,
length(new_vel[i_idx]), length(derivVelRhoD[i_idx]), length(r_shift[i_idx]), length(F_i_p),
length(F_i_mu));
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void FinalizePressure(Real4* sortedPosRad, // Read
Real4* sortedRhoPreMu,
Real* p_old,
Real3* F_p, // Write
uint* cellStart,
uint* cellEnd,
size_t numAllMarkers,
Real p_shift,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[i_idx].w <= -2) {
return;
}
if (!(isfinite(sortedRhoPreMu[i_idx].x) && isfinite(sortedRhoPreMu[i_idx].y) && isfinite(sortedRhoPreMu[i_idx].z) &&
isfinite(sortedRhoPreMu[i_idx].w))) {
printf("rhoPreMu is NAN: thrown from FinalizePressure ! %f,%f,%f\\n", sortedRhoPreMu[i_idx].x,
sortedRhoPreMu[i_idx].y, sortedRhoPreMu[i_idx].z);
sortedRhoPreMu[i_idx].y = 0.0;
}
// if (p_shift < 0)
sortedRhoPreMu[i_idx].y = p_old[i_idx] + ((paramsD.ClampPressure) ? paramsD.BASEPRES : 0.0); //- p_shift;
if (paramsD.ClampPressure && sortedRhoPreMu[i_idx].y < 0)
sortedRhoPreMu[i_idx].y = 0;
// if (sortedRhoPreMu[i_idx].y < 0)
// sortedRhoPreMu[i_idx].y = (p_old[i_idx] > 0) ? p_old[i_idx] : 0.0;
if (sortedRhoPreMu[i_idx].y > paramsD.Max_Pressure)
sortedRhoPreMu[i_idx].y = paramsD.Max_Pressure;
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceIISPH::calcPressureIISPH(std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD,
thrust::device_vector<Real3> pos_fsi_fea_D,
thrust::device_vector<Real3> vel_fsi_fea_D,
thrust::device_vector<Real3> acc_fsi_fea_D,
thrust::device_vector<Real> sumWij_inv,
thrust::device_vector<Real>& p_old,
thrust::device_vector<Real3> Normals,
thrust::device_vector<Real> G_i,
thrust::device_vector<Real>& Color) {
// Real RES = paramsH->PPE_res;
PPESolutionType mySolutionType = paramsH->PPE_Solution_type;
std::cout << "time step in calcPressureIISPH " << paramsH->dT << std::endl;
double total_step_timeClock = clock();
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------------------------------------------------------
// thread per particle
uint numThreads, numBlocks;
size_t numAllMarkers = (int)numObjectsH->numAllMarkers;
computeGridSize((uint)numAllMarkers, 256, numBlocks, numThreads);
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
thrust::device_vector<Real3> d_ii(numAllMarkers);
thrust::device_vector<Real3> V_np(numAllMarkers);
thrust::fill(d_ii.begin(), d_ii.end(), mR3(0.0));
thrust::fill(V_np.begin(), V_np.end(), mR3(0.0));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
V_i_np__AND__d_ii_kernel<<<numBlocks, numThreads>>>(
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(d_ii), mR3CAST(V_np), R1CAST(sumWij_inv), R1CAST(G_i),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers,
isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n");
}
thrust::device_vector<Real> a_ii(numAllMarkers);
thrust::device_vector<Real> rho_np(numAllMarkers);
thrust::fill(a_ii.begin(), a_ii.end(), 0.0);
thrust::fill(rho_np.begin(), rho_np.end(), 0.0);
thrust::fill(p_old.begin(), p_old.end(), 0.0);
thrust::device_vector<Real3> summGradW(numAllMarkers);
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
Rho_np_AND_a_ii_AND_sum_m_GradW<<<numBlocks, numThreads>>>(
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(rho_np), R1CAST(a_ii),
R1CAST(p_old), mR3CAST(V_np), mR3CAST(d_ii), mR3CAST(summGradW), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n");
}
thrust::device_vector<Real3> V_new(numAllMarkers);
thrust::fill(V_new.begin(), V_new.end(), mR3(0.0));
thrust::device_vector<Real> a_ij;
thrust::device_vector<Real> B_i(numAllMarkers);
thrust::device_vector<uint> csrColIndA;
thrust::device_vector<uint> numContacts(numAllMarkers);
thrust::device_vector<unsigned long int> GlobalcsrColIndA;
thrust::device_vector<Real> csrValA;
double durationFormAXB;
size_t end_fluid = numObjectsH->numGhostMarkers + numObjectsH->numHelperMarkers + numObjectsH->numFluidMarkers;
size_t end_bndry = end_fluid + numObjectsH->numBoundaryMarkers;
size_t end_rigid = end_bndry + numObjectsH->numRigidMarkers;
size_t end_flex = end_rigid + numObjectsH->numFlexMarkers;
int4 updatePortion = mI4((int)end_fluid, (int)end_bndry, (int)end_rigid, (int)end_flex);
uint NNZ;
if (mySolutionType == PPESolutionType::FORM_SPARSE_MATRIX) {
thrust::fill(a_ij.begin(), a_ij.end(), 0.0);
thrust::fill(B_i.begin(), B_i.end(), 0.0);
// thrust::fill(summGradW.begin(), summGradW.end(), mR3(0.0));
thrust::fill(numContacts.begin(), numContacts.end(), 0.0);
//------------------------------------------------------------------------
//------------- MatrixJacobi
//------------------------------------------------------------------------
bool SPARSE_FLAG = true;
double FormAXBClock = clock();
thrust::device_vector<Real> Residuals(numAllMarkers);
thrust::fill(Residuals.begin(), Residuals.end(), 1.0);
thrust::device_vector<Real> rho_p(numAllMarkers);
thrust::fill(rho_p.begin(), rho_p.end(), 0.0);
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
CalcNumber_Contacts<<<numBlocks, numThreads>>>(
U1CAST(numContacts), mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after CalcNumber_Contacts!\n");
}
uint MAX_CONTACT = thrust::reduce(numContacts.begin(), numContacts.end(), 0, thrust::maximum<Real>());
std::cout << "Max contact between SPH particles: " << MAX_CONTACT << std::endl;
uint LastVal = numContacts[numAllMarkers - 1];
thrust::exclusive_scan(numContacts.begin(), numContacts.end(), numContacts.begin());
numContacts.push_back(LastVal + numContacts[numAllMarkers - 1]);
NNZ = numContacts[numAllMarkers];
csrValA.resize(NNZ);
csrColIndA.resize(NNZ);
GlobalcsrColIndA.resize(NNZ);
thrust::fill(csrValA.begin(), csrValA.end(), 0.0);
thrust::fill(GlobalcsrColIndA.begin(), GlobalcsrColIndA.end(), 0.0);
thrust::fill(csrColIndA.begin(), csrColIndA.end(), 0.0);
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
std::cout << "updatePortion of BC: " << updatePortion.x << " " << updatePortion.y << " " << updatePortion.z
<< " " << updatePortion.w << "\n ";
FormAXB<<<numBlocks, numThreads>>>(
R1CAST(csrValA), U1CAST(csrColIndA), LU1CAST(GlobalcsrColIndA), U1CAST(numContacts), R1CAST(a_ij),
R1CAST(B_i), mR3CAST(d_ii), R1CAST(a_ii), mR3CAST(summGradW), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(V_new), R1CAST(p_old),
mR3CAST(Normals), R1CAST(G_i), R1CAST(sumWij_inv), R1CAST(rho_np),
mR4CAST(otherFsiBodiesD->q_fsiBodies_D), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D),
mR3CAST(otherFsiBodiesD->posRigid_fsiBodies_D), mR4CAST(otherFsiBodiesD->velMassRigid_fsiBodies_D),
mR3CAST(otherFsiBodiesD->omegaVelLRF_fsiBodies_D), mR3CAST(otherFsiBodiesD->accRigid_fsiBodies_D),
mR3CAST(otherFsiBodiesD->omegaAccLRF_fsiBodies_D), U1CAST(fsiGeneralData->rigidIdentifierD),
mR3CAST(pos_fsi_fea_D), mR3CAST(vel_fsi_fea_D), mR3CAST(acc_fsi_fea_D),
U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D,
U2CAST(fsiGeneralData->CableElementsNodes), U4CAST(fsiGeneralData->ShellElementsNodes),
updatePortion, U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, SPARSE_FLAG, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after F_i_np__AND__d_ii_kernel!\n");
}
durationFormAXB = (clock() - FormAXBClock) / (double)CLOCKS_PER_SEC;
}
//------------------------------------------------------------------------
//------------- Iterative loop
//------------------------------------------------------------------------
int Iteration = 0;
Real MaxRes = 100;
thrust::device_vector<Real> Residuals(numAllMarkers);
thrust::fill(Residuals.begin(), Residuals.end(), 1.0);
thrust::device_vector<Real3> dij_pj(numAllMarkers);
thrust::fill(dij_pj.begin(), dij_pj.end(), mR3(0.0));
thrust::device_vector<Real3> F_p(numAllMarkers);
thrust::fill(F_p.begin(), F_p.end(), mR3(0.0));
thrust::device_vector<Real> rho_p(numAllMarkers);
thrust::fill(rho_p.begin(), rho_p.end(), 0.0);
double LinearSystemClock = clock();
myLinearSolver->SetVerbose(paramsH->Verbose_monitoring);
myLinearSolver->SetAbsRes(paramsH->LinearSolver_Abs_Tol);
myLinearSolver->SetRelRes(paramsH->LinearSolver_Rel_Tol);
myLinearSolver->SetIterationLimit(paramsH->LinearSolver_Max_Iter);
if (paramsH->USE_LinearSolver) {
if (paramsH->PPE_Solution_type != PPESolutionType::FORM_SPARSE_MATRIX) {
printf(
"You should paramsH->PPE_Solution_type == FORM_SPARSE_MATRIX in order to use the "
"chrono_fsi linear "
"solvers\n");
exit(0);
}
myLinearSolver->Solve((int)numAllMarkers, NNZ, R1CAST(csrValA), U1CAST(numContacts), U1CAST(csrColIndA),
R1CAST(p_old), R1CAST(B_i));
cudaCheckError();
} else {
while ((MaxRes > paramsH->LinearSolver_Abs_Tol || Iteration < 3) &&
Iteration < paramsH->LinearSolver_Max_Iter) {
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
Initialize_Variables<<<numBlocks, numThreads>>>(mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old),
mR3CAST(sortedSphMarkersD->velMasD), mR3CAST(V_new),
numAllMarkers, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after Initialize_Variables!\n");
}
if (mySolutionType == PPESolutionType::MATRIX_FREE) {
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
Calc_dij_pj<<<numBlocks, numThreads>>>(
mR3CAST(dij_pj), mR3CAST(F_p), mR3CAST(d_ii), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT,
numAllMarkers, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after Calc_dij_pj!\n");
}
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
Calc_Pressure<<<numBlocks, numThreads>>>(
R1CAST(a_ii), mR3CAST(d_ii), mR3CAST(dij_pj), R1CAST(rho_np), R1CAST(rho_p), R1CAST(Residuals),
mR3CAST(F_p), mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR4CAST(otherFsiBodiesD->q_fsiBodies_D), mR3CAST(fsiGeneralData->rigidSPH_MeshPos_LRF_D),
mR3CAST(otherFsiBodiesD->posRigid_fsiBodies_D), mR4CAST(otherFsiBodiesD->velMassRigid_fsiBodies_D),
mR3CAST(otherFsiBodiesD->omegaVelLRF_fsiBodies_D), mR3CAST(otherFsiBodiesD->accRigid_fsiBodies_D),
mR3CAST(otherFsiBodiesD->omegaAccLRF_fsiBodies_D), U1CAST(fsiGeneralData->rigidIdentifierD),
mR3CAST(pos_fsi_fea_D), mR3CAST(vel_fsi_fea_D), mR3CAST(acc_fsi_fea_D),
U1CAST(fsiGeneralData->FlexIdentifierD), (int)numObjectsH->numFlexBodies1D,
U2CAST(fsiGeneralData->CableElementsNodes), U4CAST(fsiGeneralData->ShellElementsNodes),
updatePortion, U1CAST(markersProximityD->gridMarkerIndexD), R1CAST(p_old), mR3CAST(V_new),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), paramsH->dT,
numAllMarkers, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after Calc_Pressure!\n");
}
}
if (mySolutionType == PPESolutionType::FORM_SPARSE_MATRIX) {
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
Calc_Pressure_AXB_USING_CSR<<<numBlocks, numThreads>>>(
R1CAST(csrValA), R1CAST(a_ii), U1CAST(csrColIndA), U1CAST(numContacts),
mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(sumWij_inv), mR3CAST(sortedSphMarkersD->velMasD),
mR3CAST(V_new), R1CAST(p_old), R1CAST(B_i), R1CAST(Residuals), numAllMarkers, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after Iterative_pressure_update!\n");
}
}
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
Update_AND_Calc_Res<<<numBlocks, numThreads>>>(
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old),
mR3CAST(V_new), R1CAST(rho_p), R1CAST(rho_np), R1CAST(Residuals), numAllMarkers, Iteration,
paramsH->PPE_relaxation, false, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after Iterative_pressure_update!\n");
}
Iteration++;
thrust::device_vector<Real>::iterator iter = thrust::max_element(Residuals.begin(), Residuals.end());
auto position = iter - Residuals.begin();
MaxRes = *iter;
// MaxRes =
// thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::plus<Real>()) /
// numObjectsH->numAllMarkers;
// Real PMAX = thrust::reduce(p_old.begin(), p_old.end(), 0.0, thrust::maximum<Real>());
// MaxRes = thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::plus<Real>()) /
// numObjectsH->numAllMarkers;
// MaxRes = thrust::reduce(Residuals.begin(), Residuals.end(), 0.0, thrust::maximum<Real>());
// Real R_np = thrust::reduce(rho_np.begin(), rho_np.end(), 0.0, thrust::plus<Real>()) /
// rho_np.size();
// Real R_p = thrust::reduce(rho_p.begin(), rho_p.end(), 0.0, thrust::plus<Real>()) /
// rho_p.size();
//
if (paramsH->Verbose_monitoring)
printf("Iter= %d, Res= %f\n", Iteration, MaxRes);
}
}
thrust::device_vector<Real>::iterator iter = thrust::min_element(p_old.begin(), p_old.end());
auto position = iter - p_old.begin();
Real shift_p = *iter;
// Real shift_p = 0;
// This must be run if linear solver is used
if (paramsH->USE_LinearSolver || paramsH->ClampPressure) {
printf("Shifting pressure values by %f\n", -shift_p);
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
FinalizePressure<<<numBlocks, numThreads>>>(
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(p_old), mR3CAST(F_p),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, shift_p,
isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after FinalizePressure!\n");
}
}
double durationLinearSystem = (clock() - LinearSystemClock) / (double)CLOCKS_PER_SEC;
double durationtotal_step_time = (clock() - total_step_timeClock) / (double)CLOCKS_PER_SEC;
printf("---------------IISPH CLOCK-------------------\n");
printf(" Total: %f \n FormAXB: %f\n Linear System: %f \n", durationtotal_step_time, durationFormAXB,
durationLinearSystem);
if (!paramsH->USE_LinearSolver)
printf(" Iter (Jacobi+SOR)# = %d, to Res= %.3e \n", Iteration, MaxRes);
if (paramsH->USE_LinearSolver)
if (myLinearSolver->GetSolverStatus()) {
std::cout << " Solver converged to " << myLinearSolver->GetResidual() << " tolerance";
std::cout << " after " << myLinearSolver->GetNumIterations() << " iterations" << std::endl;
} else {
std::cout << "Failed to converge after " << myLinearSolver->GetIterationLimit() << " iterations";
std::cout << " (" << myLinearSolver->GetResidual() << " final residual)" << std::endl;
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
cudaFree(isErrorD);
free(isErrorH);
}
void ChFsiForceIISPH::ForceSPH(std::shared_ptr<SphMarkerDataD> otherSphMarkersD,
std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD,
std::shared_ptr<FsiMeshDataD> otherFsiMeshD) {
sphMarkersD = otherSphMarkersD;
int numAllMarkers = (int)numObjectsH->numAllMarkers;
int numHelperMarkers = (int)numObjectsH->numHelperMarkers;
fsiCollisionSystem->ArrangeData(sphMarkersD);
thrust::device_vector<Real3>::iterator iter =
thrust::max_element(sortedSphMarkersD->velMasD.begin(), sortedSphMarkersD->velMasD.end(), compare_Real3_mag());
Real MaxVel = length(*iter);
if (paramsH->Adaptive_time_stepping) {
Real dt_CFL = paramsH->Co_number * paramsH->HSML / MaxVel;
Real dt_nu = 0.25 * paramsH->HSML * paramsH->HSML / (paramsH->mu0 / paramsH->rho0);
Real dt_body = 0.25 * std::sqrt(paramsH->HSML / length(paramsH->bodyForce3 + paramsH->gravity));
Real dt = std::fmin(dt_body, std::fmin(dt_CFL, dt_nu));
if (dt / paramsH->dT_Max > 0.7 && dt / paramsH->dT_Max < 1)
paramsH->dT = paramsH->dT_Max * 0.5;
else
paramsH->dT = std::fmin(dt, paramsH->dT_Max);
CopyParams_NumberOfObjects(paramsH, numObjectsH);
printf(" time step=%.3e, dt_Max=%.3e, dt_CFL=%.3e (CFL=%.2g), dt_nu=%.3e, dt_body=%.3e\n", paramsH->dT,
paramsH->dT_Max, dt_CFL, paramsH->Co_number, dt_nu, dt_body);
}
bool *isErrorH, *isErrorD, *isErrorD2;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
cudaMalloc((void**)&isErrorD2, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
cudaMemcpy(isErrorD2, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
uint numThreads, numBlocks;
computeGridSize(numAllMarkers, 256, numBlocks, numThreads);
printf("numBlocks: %d, numThreads: %d, numAllMarker:%d \n", numBlocks, numThreads, numAllMarkers);
thrust::device_vector<Real> Color(numAllMarkers);
thrust::fill(Color.begin(), Color.end(), 1.0e10);
thrust::device_vector<Real> _sumWij_inv(numAllMarkers);
thrust::fill(_sumWij_inv.begin(), _sumWij_inv.end(), 0.0);
thrust::device_vector<Real> G_i(numAllMarkers * 9);
thrust::fill(G_i.begin(), G_i.end(), 0);
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
thrust::device_vector<uint> Contact_i(numAllMarkers);
thrust::fill(Contact_i.begin(), Contact_i.end(), 0);
calcRho_kernel<<<numBlocks, numThreads>>>(
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), U1CAST(Contact_i), numAllMarkers,
isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after calcRho_kernel!\n");
}
thrust::device_vector<Real3> Normals(numAllMarkers);
calcNormalizedRho_kernel<<<numBlocks, numThreads>>>(
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), R1CAST(G_i), mR3CAST(Normals), R1CAST(Color),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed after calcNormalizedRho_kernel!\n");
}
thrust::device_vector<Real> p_old(numAllMarkers, 0.0);
calcPressureIISPH(otherFsiBodiesD, otherFsiMeshD->pos_fsi_fea_D, otherFsiMeshD->vel_fsi_fea_D,
otherFsiMeshD->acc_fsi_fea_D, _sumWij_inv, p_old, Normals, G_i, Color);
//------------------------------------------------------------------------
// thread per particle
// std::cout << "dT in ForceSPH after calcPressure: " << paramsH->dT << "\n";
double CalcForcesClock = clock();
thrust::fill(vel_vis_Sorted_D.begin(), vel_vis_Sorted_D.end(), mR3(0.0));
thrust::fill(derivVelRhoD_Sorted_D.begin(), derivVelRhoD_Sorted_D.end(), mR4(0.0));
thrust::fill(vel_XSPH_Sorted_D.begin(), vel_XSPH_Sorted_D.end(), mR3(0.0));
thrust::device_vector<Real3> dr_shift(numAllMarkers);
thrust::fill(dr_shift.begin(), dr_shift.end(), mR3(0.0));
thrust::device_vector<Real3> NEW_Vel(numAllMarkers, mR3(0.0));
CalcForces<<<numBlocks, numThreads>>>(mR3CAST(NEW_Vel), mR4CAST(derivVelRhoD_Sorted_D),
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv), R1CAST(p_old),
mR3CAST(dr_shift), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), paramsH->dT, numAllMarkers, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed in CalcForces!\n");
}
double calcforce = (clock() - CalcForcesClock) / (double)CLOCKS_PER_SEC;
printf(" Force Computation: %f \n", calcforce);
double UpdateClock = clock();
sortedSphMarkersD->velMasD = NEW_Vel;
UpdateDensity<<<numBlocks, numThreads>>>(
mR3CAST(vel_vis_Sorted_D), mR3CAST(vel_XSPH_Sorted_D), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), R1CAST(_sumWij_inv),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), numAllMarkers, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true) {
throw std::runtime_error("Error! program crashed in CalcForces!\n");
}
CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vel_XSPH_D, vel_XSPH_Sorted_D,
markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vis_vel_SPH_D, vel_vis_Sorted_D,
markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_NonInvasive_R3(sphMarkersD->velMasD, sortedSphMarkersD->velMasD,
markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_NonInvasive_R4(sphMarkersD->posRadD, sortedSphMarkersD->posRadD,
markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_NonInvasive_R4(sphMarkersD->rhoPresMuD, sortedSphMarkersD->rhoPresMuD,
markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_NonInvasive_R4(fsiGeneralData->derivVelRhoD, derivVelRhoD_Sorted_D,
markersProximityD->gridMarkerIndexD);
printf(" Update information: %f \n", (clock() - UpdateClock) / (double)CLOCKS_PER_SEC);
printf("----------------------------------------------\n");
}
} // namespace fsi
} // namespace chrono
|
5655ae64712a9dbef369b197ff41c8730c362973.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kmeans_util_sa.h"
#include <time.h>
#define MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
#define MAX(X, Y) (((X) > (Y)) ? (X) : (Y))
// #define DEBUG
#ifdef DEBUG
#define DPRINTF(fmt, args...) \
do { \
printf("%s, line %u: " fmt "\r\n", __FUNCTION__, __LINE__ , ##args); \
fflush(stdout); \
} while (0)
#else
#define DPRINTF(fmt, args...) do{}while(0)
#endif
//Constants
__constant__ double sa_temp = 100.0;
__device__ int get_global_tid() {
return (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y +
blockDim.x*threadIdx.y + threadIdx.x;
}
double squared_distance(double* ps, double* center, int dim) {
double sum = 0;
for (int i = 0; i < dim; i++){
double temp = center[i] - ps[i];
sum += temp * temp;
}
return sum;
}
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__
double squared_distance_on_gpu(const double* ps, const double* center, const int n, const int k, const int dim) {
//squared_distance_on_gpu(&dev_points[i], &dev_centers[j], n, k, dim);
double sum = 0;
for (int i = 0, j=0; i < dim*n; i+=n,j+=k){
double temp = center[j] - ps[i];
sum += temp * temp;
}
return sum;
}
void transpose(double** src, double* dst, int n, int m){
int i, j;
for(i=0; i<n; i++){
for(j=0; j<m; j++){
dst[j*n + i] = src[i][j];
}
}
}
double** create_2D_double_array(int n, int dim) {
double **arr, *temp;
temp = (double *)calloc(n * dim, sizeof(double));
arr = (double **)calloc(n, sizeof(double *));
for (int i = 0 ; i < n; i++)
arr[i] = temp + i * dim;
if (arr == NULL || temp == NULL) {
fprintf(stderr, "Error in allocation!\n");
exit(-1);
}
return arr;
}
void delete_points(double** ps){
free(ps);
ps = NULL;
}
double** init_centers_kpp(double **ps, int n, int k, int dim){
int i;
int curr_k = 0;
int first_i;
int max, max_i;
double *distances_from_centers, *temp_distances;
distances_from_centers = (double*) malloc(sizeof(double)*n);
double **centers = create_2D_double_array(k,dim);
temp_distances = (double*) malloc(sizeof(double)*n);
// Initialize with max double
for (i = 0; i < n; i++)
distances_from_centers[i] = DBL_MAX;
srand(time(NULL));
// Choose a first point
first_i = rand() % n;
DPRINTF("First random index: %d", first_i);
memcpy(centers[curr_k], ps[first_i], dim * sizeof(double));
DPRINTF("Point 1: (%f, %f)", ps[first_i][0], ps[first_i][1]);
DPRINTF("Center 1: (%f, %f)", centers[curr_k][0], centers[curr_k][1]);
while(curr_k < k-1) {
max = -1;
max_i = -1;
for(i=0; i<n; i++){
DPRINTF("New squared_distance: %f and old min squared_distance: %f", squared_distance(ps[i], centers[curr_k], dim), distances_from_centers[i]);
temp_distances[i] = MIN(squared_distance(ps[i], centers[curr_k], dim), distances_from_centers[i]);
if(temp_distances[i] > max){
max = temp_distances[i];
max_i = i;
}
}
memcpy(distances_from_centers, temp_distances, n * sizeof(double));
memcpy(centers[++curr_k], ps[max_i], dim * sizeof(double));
}
free(temp_distances);
free(distances_from_centers);
return centers;
}
__global__
void find_cluster_on_gpu(const double *dev_points, const double *dev_centers,
const int n, const int k, const int dim,
double *result_clusters) {
double min, dist;
int cluster_it_belongs;
const unsigned int index = get_global_tid();
const unsigned int thread_id = threadIdx.x;
extern __shared__ double local_centers[];
const unsigned int start = index;
const unsigned int end = start + 1;
// WARNING: Mporei na dhmiourgithei provlhma an ta threads sto block einai ligotera apo to k*dim
if(thread_id < k*dim){
local_centers[thread_id] = dev_centers[thread_id];
}
__syncthreads();
if (index < n){
for (int i = start; i < end; i++){
min = DBL_MAX;
for (int j = 0; j < k; j++){
result_clusters[j*n + i] = 0.0;
dist = squared_distance_on_gpu(&dev_points[i], &local_centers[j], n, k, dim);
if (min > dist){
//printf("Thread: %3d Found better cluster %d \n", index, j);
min = dist;
cluster_it_belongs = j;
}
// cluster_it_belongs = j*(min > dist) + cluster_it_belongs*(min <= dist);
// min = min*(min <= dist) + dist*(min > dist);
// cluster_it_belongs = cluster_it_belongs ^ ((j ^ cluster_it_belongs) & -(min < dist));
// min = dist ^ ((min ^ dist) & -(min < dist));
}
// Only 1 in the cluster it belongs and everything else 0
// result_clusters[cluster_it_belongs*n + i] = 1.0;
result_clusters[cluster_it_belongs*n + i] = 1.0;
// for (int j = 0; j < k; j++){
// printf("result_clusters[%d][%d] = %lf --> line[%d]\n", j, i, result_clusters[j*n + i], i+2);
// }
}
}
}
__global__
void find_cluster_on_gpu3(const double *dev_points, const double *dev_centers,
const int n, const int k, const int dim,
double *result_clusters, int *result_clusters_old) {
/*
Description
This is the same kernel with the normal find_cluster_on_gpu
It is just augmented in order to populate the result_clusters_old array
which is used by the SAKM kernel.
This kernel is called just once after k++ init via the init_points_clusters
func so I suppose it causes no harm apart from keeping yet another result_clusters
array in GPU Memory
*/
double min, dist;
int cluster_it_belongs;
const unsigned int index = get_global_tid();
const unsigned int thread_id = threadIdx.x;
extern __shared__ double local_centers[];
const unsigned int start = index;
const unsigned int end = start + 1;
// WARNING: Mporei na dhmiourgithei provlhma an ta threads sto block einai ligotera apo to k*dim
if(thread_id < k*dim){
local_centers[thread_id] = dev_centers[thread_id];
}
__syncthreads();
if (index < n){
for (int i = start; i < end; i++){
min = DBL_MAX;
for (int j = 0; j < k; j++){
result_clusters[j*n + i] = 0.0;
dist = squared_distance_on_gpu(&dev_points[i], &local_centers[j], n, k, dim);
if (min > dist){
min = dist;
cluster_it_belongs = j;
}
}
result_clusters[cluster_it_belongs*n + i] = 1.0;
result_clusters_old[i] = cluster_it_belongs;
}
}
}
__global__
void SAGM_perturbation(double *dev_centers,
const int k, const int dim,
hiprandState_t *devStates) {
const unsigned int index = get_global_tid();
if (index < k) {
//The original algorithm chooses one random cluster.
//This is gonna be way too inefficient with a gpu so the algorithm
//is modified and all the centers will be perturbed simultanuously
double delta = 0.0005;
//Perturb Center
//int rand_center = (int) hiprand_uniform(&devStates[index]) * (k-1);
for (int i=0; i<k*dim; i+=k){
double unif = delta*(2.0*hiprand_uniform(&devStates[index]) - 1.0)*1024.0;
double old = dev_centers[index+i];
dev_centers[index + i] += unif;
//printf("Thread: %d modified Center %d from %lf to %lf \n", index, index, old, dev_centers[index + i]);
}
}
}
__global__ void init_RNG(hiprandState_t *devStates, unsigned long seed){
const unsigned int index = get_global_tid();
//Init hiprand for each thread
//printf("Thread ID: %d Setting Seed %ld \n", index, seed);
hiprand_init(seed, index, 0, &devStates[index]);
}
__global__
void SAKM_perturbation(const double *dev_points, const double *dev_centers,
const int n, const int k, const int dim,
double *result_clusters, int *result_clusters_old, hiprandState_t *devStates) {
double min, dist;
int old_cluster, new_cluster;
const unsigned int index = get_global_tid();
const unsigned int thread_id = threadIdx.x;
extern __shared__ double local_centers[];
const unsigned int start = index;
const unsigned int end = start + 1;
if (index < n) {
for (int i = start; i < end; i++){
double d_from_current = 0.0;
//Fetch current cluster
old_cluster = result_clusters_old[i];
d_from_current = squared_distance_on_gpu(&dev_points[i], &dev_centers[old_cluster], n, k, dim);
//Find best candidate
min = DBL_MAX;
new_cluster = old_cluster;
for (int j = 0; j < k; j++){
result_clusters[j*n + i] = 0.0;
if (j == old_cluster) continue;
dist = squared_distance_on_gpu(&dev_points[i], &local_centers[j], n, k, dim);
//printf("Thread: %3d Checking %d %lf vs %d %lf \n", index, old_cluster, d_from_current, j, dist);
if (min - dist > EPS){
min = dist;
new_cluster = j;
}
}
d_from_current = sqrt(d_from_current);
min = sqrt(min);
double prob = exp( -abs(min - d_from_current) * sa_temp );
double unif = hiprand_uniform(&devStates[index]);
//printf("Thread: %3d Old Center: %d %lf New Center %d %lf \n", index, old_cluster, d_from_current, new_cluster, min);
//printf("Thread: %3d Temp : %lf exp(_x) %lf %lf Prob %4.3lf Unif %4.1lf Take Move:%d \n",
// index, 1.0/sa_temp, d_from_current, min, prob, unif, prob > unif);
if (prob > unif) {
result_clusters[new_cluster*n + i] = 1.0;
result_clusters_old[i] = new_cluster;
} else {
//Get back
result_clusters[old_cluster*n + i] = 1.0;
result_clusters_old[i] = old_cluster;
}
}
}
}
__global__
void update_center_on_gpu(const int n, const int k, const int dim,
double* dev_centers,
const double* dev_points_in_cluster){
int i, j;
const unsigned int index = get_global_tid();
const unsigned int start = index;
const unsigned int end = start + 1;
// do all numbers in k*dim threads
if (index < k){
for (i = start; i < end; i++) {
// printf("dev_points_in_cluster[%d] = %d\n", i, (int)dev_points_in_cluster[i]);
// for (j = 0; j < dim; j++){
// printf("dev_centers[%d][%d] = %lf\n", i, j, dev_centers[i*dim + j]);
// }
if (dev_points_in_cluster[i] > 0) {
#pragma unroll
for (j = 0; j < dim; j++){
// FIXME: Two arrays here because of the transposed reslults of CUBLAS
//dev_temp_centers[i*dim + j] = dev_centers[j*k + i] / (int)dev_points_in_cluster[i];
dev_centers[j*k + i] /= dev_points_in_cluster[i];
}
// printf("Points in cluster: %d, %d\n", index, dev_points_in_cluster[i]);
}
// for (j = 0; j < dim; j++){
// printf("new_dev_centers[%d][%d] = %lf\n", i, j, dev_centers[i*dim + j]);
// }
}
}
}
__global__
void create_dev_ones(double* dev_ones, int n) {
int index = get_global_tid();
if(index < n){
dev_ones[index] = 1.0;
}
}
// Just a wrapper function of create_dev_ones to avoid putting that
// function into kmeans_gpu. (create_dev_ones is used in main)
void call_create_dev_ones(double* dev_ones, int n, dim3 gpu_grid, dim3 gpu_block) {
hipLaunchKernelGGL(( create_dev_ones), dim3(gpu_grid),dim3(gpu_block), 0, 0, dev_ones, n);
hipDeviceSynchronize();
}
void swap(double** src, double** dst){
double *temp = *src;
*src = *dst;
*dst = temp;
}
void swap(double* src, double* dst){
double *temp;
temp = src;
src = dst;
dst = temp;
}
__global__ void sum_distances(double* dev_points, double* dev_centers_of_points, double* dev_points_help, int n, int k, int dim){
int index = get_global_tid();
if(index < n){
double dist = 0;
for (int i = 0; i < dim*n; i+=n){
double temp = dev_centers_of_points[index + i] - dev_points[index + i];
dist += temp * temp;
}
//dist = sqrt(dist);
dev_points_help[index] = dist;
}
}
double evaluate_solution(double* dev_points,
double* dev_centers,
double* dev_points_clusters,
double* dev_centers_of_points,
double* dev_points_help,
int n, int k, int dim,
dim3 gpu_grid, dim3 gpu_block,
//CUBLAS stuff
hipblasHandle_t handle,
hipblasStatus_t stat){
/*
The cost returned from this function is the sum of the distances
of the points from their assigned clusters.
*/
double cost = 0.0;
double alpha = 1.0;
double beta = 0.0;
// get assigned center coords for each point
stat = hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
n, dim, k,
&alpha,
dev_points_clusters, n,
dev_centers, k,
&beta,
dev_centers_of_points, n);
alpha = -1.0;
//Calculate distances and cost
hipLaunchKernelGGL(( sum_distances), dim3(gpu_grid), dim3(gpu_block), 0, 0, dev_points, dev_centers_of_points, dev_points_help, n, k, dim);
hipDeviceSynchronize();
//Get cost with cublas
stat = hipblasDasum(handle, n, dev_points_help, 1, &cost);
return cost;
}
double kmeans_on_gpu(
double* dev_points,
double* dev_centers,
int n, int k, int dim,
double* dev_points_clusters,
double* dev_points_in_cluster,
double *dev_centers_of_points,
double* dev_new_centers,
int* dev_check,
int BLOCK_SIZE,
//CUBLAS Shit
hipblasHandle_t handle,
hipblasStatus_t stat,
double* dev_ones,
double* dev_points_help,
double* dev_temp_centers) {
double alpha = 1.0, beta = 0.0;
// Calculate grid and block sizes
int grid_size = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
dim3 gpu_grid(grid_size, 1);
dim3 gpu_block(BLOCK_SIZE, 1);
// printf("Grid size : %dx%d\n", gpu_grid.x, gpu_grid.y);
// printf("Block size: %dx%d\n", gpu_block.x, gpu_block.y);
// printf("Shared memory size: %ld bytes\n", shmem_size);
// assign points to clusters - step 1
hipLaunchKernelGGL(( find_cluster_on_gpu), dim3(gpu_grid),dim3(gpu_block), k*dim*sizeof(double), 0,
dev_points,
dev_centers,
n, k, dim,
dev_points_clusters);
hipDeviceSynchronize();
// update means - step 2
hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
k, dim, n,
&alpha,
dev_points_clusters, n,
dev_points, n,
&beta,
dev_new_centers, k);
// hipDeviceSynchronize();
hipblasDgemv(handle, HIPBLAS_OP_T,
n, k,
&alpha,
dev_points_clusters, n,
dev_ones, 1,
&beta,
dev_points_in_cluster, 1);
// hipDeviceSynchronize();
// Update centers based on counted points
hipLaunchKernelGGL(( update_center_on_gpu), dim3(gpu_grid),dim3(gpu_block), 0, 0,
n, k, dim,
dev_new_centers,
dev_points_in_cluster);
hipDeviceSynchronize();
//Check for convergence with CUBLAS
//dev_new_centers and dev_centers arrays are actually checked for equality
//No distances are calculated separately for each center point.
//It seems like its working smoothly so far
int icheck = 0; //This is used to make it compatible with how the code works now
double check = 0.0;
//First subtract the dev_center arrays
alpha = -1.0;
hipblasDaxpy(handle, k*dim, &alpha, dev_new_centers, 1, dev_centers, 1);
// hipDeviceSynchronize();
//Now find the norm2 of the new_centers
// hipblasSetPointerMode(handle,HIPBLAS_POINTER_MODE_HOST);
hipblasDnrm2(handle, k*dim, dev_centers, 1, &check);
// hipDeviceSynchronize();
if (!(check > EPS)) icheck = 1;
copy_to_gpu(&icheck, dev_check, sizeof(int));
//Update new centers
// TODO: Swap pointers
hipMemcpy(dev_centers, dev_new_centers, sizeof(double)*k*dim, hipMemcpyDeviceToDevice);
return 0.0;
}
void setup_RNG_states(hiprandState_t* devStates, dim3 gpu_grid, dim3 gpu_block){
unsigned long seed = rand();
//printf("Setting Seed to curandgen: %ld\n", seed);
hipLaunchKernelGGL(( init_RNG), dim3(gpu_grid), dim3(gpu_block), 0, 0, devStates, seed);
//printf("SETUP RNG - CUDA Check: %s\n", gpu_get_last_errmsg());
hipDeviceSynchronize();
}
void init_point_clusters(double *dev_points, double *dev_centers,
int n, int k, int dim,
dim3 gpu_grid, dim3 gpu_block,
double *result_clusters, int *result_clusters_old,
hiprandState_t *devStates){
//Init result_cluster arrays to 0
hipMemset(result_clusters, 0x0, n*k*sizeof(double));
hipMemset(result_clusters_old, 0x0, n*sizeof(int));
hipLaunchKernelGGL(( find_cluster_on_gpu3), dim3(gpu_grid), dim3(gpu_block), k*dim*sizeof(double), 0, dev_points, dev_centers,
n, k, dim,
result_clusters, result_clusters_old);
//printf("Init Point Clusters - CUDA Check: %s\n", gpu_get_last_errmsg());
hipDeviceSynchronize();
}
double kmeans_on_gpu_SA(
double* dev_points,
double* dev_centers,
int n, int k, int dim,
double* dev_points_clusters,
int* dev_points_clusters_old,
double* dev_points_in_cluster,
double* dev_centers_of_points,
double* dev_new_centers,
int* dev_check,
dim3 gpu_grid,
dim3 gpu_block,
//CUBLAS Shit
hipblasHandle_t handle,
hipblasStatus_t stat,
double* dev_ones,
double* dev_points_help,
double* dev_temp_centers,
hiprandState_t* devStates,
double temp) {
double alpha = 1.0, beta = 0.0;
//Upload Temperature to constant memory
//temp = 1.0/temp;
//hipMemcpyToSymbol(sa_temp, &temp, sizeof(double), 0, hipMemcpyHostToDevice);
//STEP 1 WITH SAKM
/*
SAKM_perturbation<<<gpu_grid, gpu_block, k*dim*sizeof(double)>>>(
dev_points,
dev_centers,
n, k, dim,
dev_points_clusters,
dev_points_clusters_old,
devStates);
//printf("SA Kernel Check: %s\n", gpu_get_last_errmsg());
hipDeviceSynchronize();
*/
dim3 gpu_grid_c((k + 32 - 1)/32, 1);
dim3 gpu_block_c(32, 1);
hipLaunchKernelGGL(( SAGM_perturbation), dim3(gpu_grid_c), dim3(gpu_block_c), 0, 0, dev_centers, k, dim, devStates);
//printf("SAGM Kernel Check: %s\n", gpu_get_last_errmsg());
hipDeviceSynchronize();
// assign points to clusters - step 1
hipLaunchKernelGGL(( find_cluster_on_gpu), dim3(gpu_grid),dim3(gpu_block), k*dim*sizeof(double), 0,
dev_points,
dev_centers,
n, k, dim,
dev_points_clusters);
hipDeviceSynchronize();
// update means - step 2
hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
k, dim, n,
&alpha,
dev_points_clusters, n,
dev_points, n,
&beta,
dev_new_centers, k);
hipblasDgemv(handle, HIPBLAS_OP_T,
n, k,
&alpha,
dev_points_clusters, n,
dev_ones, 1,
&beta,
dev_points_in_cluster, 1);
// Update centers based on counted points
hipLaunchKernelGGL(( update_center_on_gpu), dim3(gpu_grid),dim3(gpu_block), 0, 0,
n, k, dim,
dev_new_centers,
dev_points_in_cluster);
hipDeviceSynchronize();
// Evaluate current solution
double cost = evaluate_solution(dev_points, dev_new_centers, dev_points_clusters,
dev_centers_of_points, dev_points_help,
n, k, dim,
gpu_grid, gpu_block,
handle, stat);
//SAGM Paper notes that the cost function is the SSE (sum of squared error)
// In order to calculate the SSE we need to
/*
//Check for convergence with CUBLAS
double check = 0.0;
//First subtract the dev_center arrays
alpha = -1.0;
hipMemcpy(dev_temp_centers, dev_centers, sizeof(double)*k*dim, hipMemcpyDeviceToDevice);
hipblasDaxpy(handle, k*dim, &alpha, dev_new_centers, 1, dev_temp_centers, 1);
//Now find the norm2 of the new_centers
hipblasDnrm2(handle, k*dim, dev_temp_centers, 1, &check);
*/
return cost;
}
| 5655ae64712a9dbef369b197ff41c8730c362973.cu | #include "kmeans_util_sa.h"
#include <time.h>
#define MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
#define MAX(X, Y) (((X) > (Y)) ? (X) : (Y))
// #define DEBUG
#ifdef DEBUG
#define DPRINTF(fmt, args...) \
do { \
printf("%s, line %u: " fmt "\r\n", __FUNCTION__, __LINE__ , ##args); \
fflush(stdout); \
} while (0)
#else
#define DPRINTF(fmt, args...) do{}while(0)
#endif
//Constants
__constant__ double sa_temp = 100.0;
__device__ int get_global_tid() {
return (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y +
blockDim.x*threadIdx.y + threadIdx.x;
}
double squared_distance(double* ps, double* center, int dim) {
double sum = 0;
for (int i = 0; i < dim; i++){
double temp = center[i] - ps[i];
sum += temp * temp;
}
return sum;
}
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__
double squared_distance_on_gpu(const double* ps, const double* center, const int n, const int k, const int dim) {
//squared_distance_on_gpu(&dev_points[i], &dev_centers[j], n, k, dim);
double sum = 0;
for (int i = 0, j=0; i < dim*n; i+=n,j+=k){
double temp = center[j] - ps[i];
sum += temp * temp;
}
return sum;
}
void transpose(double** src, double* dst, int n, int m){
int i, j;
for(i=0; i<n; i++){
for(j=0; j<m; j++){
dst[j*n + i] = src[i][j];
}
}
}
double** create_2D_double_array(int n, int dim) {
double **arr, *temp;
temp = (double *)calloc(n * dim, sizeof(double));
arr = (double **)calloc(n, sizeof(double *));
for (int i = 0 ; i < n; i++)
arr[i] = temp + i * dim;
if (arr == NULL || temp == NULL) {
fprintf(stderr, "Error in allocation!\n");
exit(-1);
}
return arr;
}
void delete_points(double** ps){
free(ps);
ps = NULL;
}
double** init_centers_kpp(double **ps, int n, int k, int dim){
int i;
int curr_k = 0;
int first_i;
int max, max_i;
double *distances_from_centers, *temp_distances;
distances_from_centers = (double*) malloc(sizeof(double)*n);
double **centers = create_2D_double_array(k,dim);
temp_distances = (double*) malloc(sizeof(double)*n);
// Initialize with max double
for (i = 0; i < n; i++)
distances_from_centers[i] = DBL_MAX;
srand(time(NULL));
// Choose a first point
first_i = rand() % n;
DPRINTF("First random index: %d", first_i);
memcpy(centers[curr_k], ps[first_i], dim * sizeof(double));
DPRINTF("Point 1: (%f, %f)", ps[first_i][0], ps[first_i][1]);
DPRINTF("Center 1: (%f, %f)", centers[curr_k][0], centers[curr_k][1]);
while(curr_k < k-1) {
max = -1;
max_i = -1;
for(i=0; i<n; i++){
DPRINTF("New squared_distance: %f and old min squared_distance: %f", squared_distance(ps[i], centers[curr_k], dim), distances_from_centers[i]);
temp_distances[i] = MIN(squared_distance(ps[i], centers[curr_k], dim), distances_from_centers[i]);
if(temp_distances[i] > max){
max = temp_distances[i];
max_i = i;
}
}
memcpy(distances_from_centers, temp_distances, n * sizeof(double));
memcpy(centers[++curr_k], ps[max_i], dim * sizeof(double));
}
free(temp_distances);
free(distances_from_centers);
return centers;
}
__global__
void find_cluster_on_gpu(const double *dev_points, const double *dev_centers,
const int n, const int k, const int dim,
double *result_clusters) {
double min, dist;
int cluster_it_belongs;
const unsigned int index = get_global_tid();
const unsigned int thread_id = threadIdx.x;
extern __shared__ double local_centers[];
const unsigned int start = index;
const unsigned int end = start + 1;
// WARNING: Mporei na dhmiourgithei provlhma an ta threads sto block einai ligotera apo to k*dim
if(thread_id < k*dim){
local_centers[thread_id] = dev_centers[thread_id];
}
__syncthreads();
if (index < n){
for (int i = start; i < end; i++){
min = DBL_MAX;
for (int j = 0; j < k; j++){
result_clusters[j*n + i] = 0.0;
dist = squared_distance_on_gpu(&dev_points[i], &local_centers[j], n, k, dim);
if (min > dist){
//printf("Thread: %3d Found better cluster %d \n", index, j);
min = dist;
cluster_it_belongs = j;
}
// cluster_it_belongs = j*(min > dist) + cluster_it_belongs*(min <= dist);
// min = min*(min <= dist) + dist*(min > dist);
// cluster_it_belongs = cluster_it_belongs ^ ((j ^ cluster_it_belongs) & -(min < dist));
// min = dist ^ ((min ^ dist) & -(min < dist));
}
// Only 1 in the cluster it belongs and everything else 0
// result_clusters[cluster_it_belongs*n + i] = 1.0;
result_clusters[cluster_it_belongs*n + i] = 1.0;
// for (int j = 0; j < k; j++){
// printf("result_clusters[%d][%d] = %lf --> line[%d]\n", j, i, result_clusters[j*n + i], i+2);
// }
}
}
}
__global__
void find_cluster_on_gpu3(const double *dev_points, const double *dev_centers,
const int n, const int k, const int dim,
double *result_clusters, int *result_clusters_old) {
/*
Description
This is the same kernel with the normal find_cluster_on_gpu
It is just augmented in order to populate the result_clusters_old array
which is used by the SAKM kernel.
This kernel is called just once after k++ init via the init_points_clusters
func so I suppose it causes no harm apart from keeping yet another result_clusters
array in GPU Memory
*/
double min, dist;
int cluster_it_belongs;
const unsigned int index = get_global_tid();
const unsigned int thread_id = threadIdx.x;
extern __shared__ double local_centers[];
const unsigned int start = index;
const unsigned int end = start + 1;
// WARNING: Mporei na dhmiourgithei provlhma an ta threads sto block einai ligotera apo to k*dim
if(thread_id < k*dim){
local_centers[thread_id] = dev_centers[thread_id];
}
__syncthreads();
if (index < n){
for (int i = start; i < end; i++){
min = DBL_MAX;
for (int j = 0; j < k; j++){
result_clusters[j*n + i] = 0.0;
dist = squared_distance_on_gpu(&dev_points[i], &local_centers[j], n, k, dim);
if (min > dist){
min = dist;
cluster_it_belongs = j;
}
}
result_clusters[cluster_it_belongs*n + i] = 1.0;
result_clusters_old[i] = cluster_it_belongs;
}
}
}
__global__
void SAGM_perturbation(double *dev_centers,
const int k, const int dim,
curandState *devStates) {
const unsigned int index = get_global_tid();
if (index < k) {
//The original algorithm chooses one random cluster.
//This is gonna be way too inefficient with a gpu so the algorithm
//is modified and all the centers will be perturbed simultanuously
double delta = 0.0005;
//Perturb Center
//int rand_center = (int) curand_uniform(&devStates[index]) * (k-1);
for (int i=0; i<k*dim; i+=k){
double unif = delta*(2.0*curand_uniform(&devStates[index]) - 1.0)*1024.0;
double old = dev_centers[index+i];
dev_centers[index + i] += unif;
//printf("Thread: %d modified Center %d from %lf to %lf \n", index, index, old, dev_centers[index + i]);
}
}
}
__global__ void init_RNG(curandState *devStates, unsigned long seed){
const unsigned int index = get_global_tid();
//Init curand for each thread
//printf("Thread ID: %d Setting Seed %ld \n", index, seed);
curand_init(seed, index, 0, &devStates[index]);
}
__global__
void SAKM_perturbation(const double *dev_points, const double *dev_centers,
const int n, const int k, const int dim,
double *result_clusters, int *result_clusters_old, curandState *devStates) {
double min, dist;
int old_cluster, new_cluster;
const unsigned int index = get_global_tid();
const unsigned int thread_id = threadIdx.x;
extern __shared__ double local_centers[];
const unsigned int start = index;
const unsigned int end = start + 1;
if (index < n) {
for (int i = start; i < end; i++){
double d_from_current = 0.0;
//Fetch current cluster
old_cluster = result_clusters_old[i];
d_from_current = squared_distance_on_gpu(&dev_points[i], &dev_centers[old_cluster], n, k, dim);
//Find best candidate
min = DBL_MAX;
new_cluster = old_cluster;
for (int j = 0; j < k; j++){
result_clusters[j*n + i] = 0.0;
if (j == old_cluster) continue;
dist = squared_distance_on_gpu(&dev_points[i], &local_centers[j], n, k, dim);
//printf("Thread: %3d Checking %d %lf vs %d %lf \n", index, old_cluster, d_from_current, j, dist);
if (min - dist > EPS){
min = dist;
new_cluster = j;
}
}
d_from_current = sqrt(d_from_current);
min = sqrt(min);
double prob = exp( -abs(min - d_from_current) * sa_temp );
double unif = curand_uniform(&devStates[index]);
//printf("Thread: %3d Old Center: %d %lf New Center %d %lf \n", index, old_cluster, d_from_current, new_cluster, min);
//printf("Thread: %3d Temp : %lf exp(_x) %lf %lf Prob %4.3lf Unif %4.1lf Take Move:%d \n",
// index, 1.0/sa_temp, d_from_current, min, prob, unif, prob > unif);
if (prob > unif) {
result_clusters[new_cluster*n + i] = 1.0;
result_clusters_old[i] = new_cluster;
} else {
//Get back
result_clusters[old_cluster*n + i] = 1.0;
result_clusters_old[i] = old_cluster;
}
}
}
}
__global__
void update_center_on_gpu(const int n, const int k, const int dim,
double* dev_centers,
const double* dev_points_in_cluster){
int i, j;
const unsigned int index = get_global_tid();
const unsigned int start = index;
const unsigned int end = start + 1;
// do all numbers in k*dim threads
if (index < k){
for (i = start; i < end; i++) {
// printf("dev_points_in_cluster[%d] = %d\n", i, (int)dev_points_in_cluster[i]);
// for (j = 0; j < dim; j++){
// printf("dev_centers[%d][%d] = %lf\n", i, j, dev_centers[i*dim + j]);
// }
if (dev_points_in_cluster[i] > 0) {
#pragma unroll
for (j = 0; j < dim; j++){
// FIXME: Two arrays here because of the transposed reslults of CUBLAS
//dev_temp_centers[i*dim + j] = dev_centers[j*k + i] / (int)dev_points_in_cluster[i];
dev_centers[j*k + i] /= dev_points_in_cluster[i];
}
// printf("Points in cluster: %d, %d\n", index, dev_points_in_cluster[i]);
}
// for (j = 0; j < dim; j++){
// printf("new_dev_centers[%d][%d] = %lf\n", i, j, dev_centers[i*dim + j]);
// }
}
}
}
__global__
void create_dev_ones(double* dev_ones, int n) {
int index = get_global_tid();
if(index < n){
dev_ones[index] = 1.0;
}
}
// Just a wrapper function of create_dev_ones to avoid putting that
// function into kmeans_gpu. (create_dev_ones is used in main)
void call_create_dev_ones(double* dev_ones, int n, dim3 gpu_grid, dim3 gpu_block) {
create_dev_ones<<<gpu_grid,gpu_block>>>(dev_ones, n);
cudaDeviceSynchronize();
}
void swap(double** src, double** dst){
double *temp = *src;
*src = *dst;
*dst = temp;
}
void swap(double* src, double* dst){
double *temp;
temp = src;
src = dst;
dst = temp;
}
__global__ void sum_distances(double* dev_points, double* dev_centers_of_points, double* dev_points_help, int n, int k, int dim){
int index = get_global_tid();
if(index < n){
double dist = 0;
for (int i = 0; i < dim*n; i+=n){
double temp = dev_centers_of_points[index + i] - dev_points[index + i];
dist += temp * temp;
}
//dist = sqrt(dist);
dev_points_help[index] = dist;
}
}
double evaluate_solution(double* dev_points,
double* dev_centers,
double* dev_points_clusters,
double* dev_centers_of_points,
double* dev_points_help,
int n, int k, int dim,
dim3 gpu_grid, dim3 gpu_block,
//CUBLAS stuff
cublasHandle_t handle,
cublasStatus_t stat){
/*
The cost returned from this function is the sum of the distances
of the points from their assigned clusters.
*/
double cost = 0.0;
double alpha = 1.0;
double beta = 0.0;
// get assigned center coords for each point
stat = cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N,
n, dim, k,
&alpha,
dev_points_clusters, n,
dev_centers, k,
&beta,
dev_centers_of_points, n);
alpha = -1.0;
//Calculate distances and cost
sum_distances<<<gpu_grid, gpu_block>>>(dev_points, dev_centers_of_points, dev_points_help, n, k, dim);
cudaDeviceSynchronize();
//Get cost with cublas
stat = cublasDasum(handle, n, dev_points_help, 1, &cost);
return cost;
}
double kmeans_on_gpu(
double* dev_points,
double* dev_centers,
int n, int k, int dim,
double* dev_points_clusters,
double* dev_points_in_cluster,
double *dev_centers_of_points,
double* dev_new_centers,
int* dev_check,
int BLOCK_SIZE,
//CUBLAS Shit
cublasHandle_t handle,
cublasStatus_t stat,
double* dev_ones,
double* dev_points_help,
double* dev_temp_centers) {
double alpha = 1.0, beta = 0.0;
// Calculate grid and block sizes
int grid_size = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
dim3 gpu_grid(grid_size, 1);
dim3 gpu_block(BLOCK_SIZE, 1);
// printf("Grid size : %dx%d\n", gpu_grid.x, gpu_grid.y);
// printf("Block size: %dx%d\n", gpu_block.x, gpu_block.y);
// printf("Shared memory size: %ld bytes\n", shmem_size);
// assign points to clusters - step 1
find_cluster_on_gpu<<<gpu_grid,gpu_block, k*dim*sizeof(double)>>>(
dev_points,
dev_centers,
n, k, dim,
dev_points_clusters);
cudaDeviceSynchronize();
// update means - step 2
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N,
k, dim, n,
&alpha,
dev_points_clusters, n,
dev_points, n,
&beta,
dev_new_centers, k);
// cudaDeviceSynchronize();
cublasDgemv(handle, CUBLAS_OP_T,
n, k,
&alpha,
dev_points_clusters, n,
dev_ones, 1,
&beta,
dev_points_in_cluster, 1);
// cudaDeviceSynchronize();
// Update centers based on counted points
update_center_on_gpu<<<gpu_grid,gpu_block>>>(
n, k, dim,
dev_new_centers,
dev_points_in_cluster);
cudaDeviceSynchronize();
//Check for convergence with CUBLAS
//dev_new_centers and dev_centers arrays are actually checked for equality
//No distances are calculated separately for each center point.
//It seems like its working smoothly so far
int icheck = 0; //This is used to make it compatible with how the code works now
double check = 0.0;
//First subtract the dev_center arrays
alpha = -1.0;
cublasDaxpy(handle, k*dim, &alpha, dev_new_centers, 1, dev_centers, 1);
// cudaDeviceSynchronize();
//Now find the norm2 of the new_centers
// cublasSetPointerMode(handle,CUBLAS_POINTER_MODE_HOST);
cublasDnrm2(handle, k*dim, dev_centers, 1, &check);
// cudaDeviceSynchronize();
if (!(check > EPS)) icheck = 1;
copy_to_gpu(&icheck, dev_check, sizeof(int));
//Update new centers
// TODO: Swap pointers
cudaMemcpy(dev_centers, dev_new_centers, sizeof(double)*k*dim, cudaMemcpyDeviceToDevice);
return 0.0;
}
void setup_RNG_states(curandState* devStates, dim3 gpu_grid, dim3 gpu_block){
unsigned long seed = rand();
//printf("Setting Seed to curandgen: %ld\n", seed);
init_RNG<<<gpu_grid, gpu_block>>>(devStates, seed);
//printf("SETUP RNG - CUDA Check: %s\n", gpu_get_last_errmsg());
cudaDeviceSynchronize();
}
void init_point_clusters(double *dev_points, double *dev_centers,
int n, int k, int dim,
dim3 gpu_grid, dim3 gpu_block,
double *result_clusters, int *result_clusters_old,
curandState *devStates){
//Init result_cluster arrays to 0
cudaMemset(result_clusters, 0x0, n*k*sizeof(double));
cudaMemset(result_clusters_old, 0x0, n*sizeof(int));
find_cluster_on_gpu3<<<gpu_grid, gpu_block, k*dim*sizeof(double)>>>(dev_points, dev_centers,
n, k, dim,
result_clusters, result_clusters_old);
//printf("Init Point Clusters - CUDA Check: %s\n", gpu_get_last_errmsg());
cudaDeviceSynchronize();
}
double kmeans_on_gpu_SA(
double* dev_points,
double* dev_centers,
int n, int k, int dim,
double* dev_points_clusters,
int* dev_points_clusters_old,
double* dev_points_in_cluster,
double* dev_centers_of_points,
double* dev_new_centers,
int* dev_check,
dim3 gpu_grid,
dim3 gpu_block,
//CUBLAS Shit
cublasHandle_t handle,
cublasStatus_t stat,
double* dev_ones,
double* dev_points_help,
double* dev_temp_centers,
curandState* devStates,
double temp) {
double alpha = 1.0, beta = 0.0;
//Upload Temperature to constant memory
//temp = 1.0/temp;
//cudaMemcpyToSymbol(sa_temp, &temp, sizeof(double), 0, cudaMemcpyHostToDevice);
//STEP 1 WITH SAKM
/*
SAKM_perturbation<<<gpu_grid, gpu_block, k*dim*sizeof(double)>>>(
dev_points,
dev_centers,
n, k, dim,
dev_points_clusters,
dev_points_clusters_old,
devStates);
//printf("SA Kernel Check: %s\n", gpu_get_last_errmsg());
cudaDeviceSynchronize();
*/
dim3 gpu_grid_c((k + 32 - 1)/32, 1);
dim3 gpu_block_c(32, 1);
SAGM_perturbation<<<gpu_grid_c, gpu_block_c>>>(dev_centers, k, dim, devStates);
//printf("SAGM Kernel Check: %s\n", gpu_get_last_errmsg());
cudaDeviceSynchronize();
// assign points to clusters - step 1
find_cluster_on_gpu<<<gpu_grid,gpu_block, k*dim*sizeof(double)>>>(
dev_points,
dev_centers,
n, k, dim,
dev_points_clusters);
cudaDeviceSynchronize();
// update means - step 2
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N,
k, dim, n,
&alpha,
dev_points_clusters, n,
dev_points, n,
&beta,
dev_new_centers, k);
cublasDgemv(handle, CUBLAS_OP_T,
n, k,
&alpha,
dev_points_clusters, n,
dev_ones, 1,
&beta,
dev_points_in_cluster, 1);
// Update centers based on counted points
update_center_on_gpu<<<gpu_grid,gpu_block>>>(
n, k, dim,
dev_new_centers,
dev_points_in_cluster);
cudaDeviceSynchronize();
// Evaluate current solution
double cost = evaluate_solution(dev_points, dev_new_centers, dev_points_clusters,
dev_centers_of_points, dev_points_help,
n, k, dim,
gpu_grid, gpu_block,
handle, stat);
//SAGM Paper notes that the cost function is the SSE (sum of squared error)
// In order to calculate the SSE we need to
/*
//Check for convergence with CUBLAS
double check = 0.0;
//First subtract the dev_center arrays
alpha = -1.0;
cudaMemcpy(dev_temp_centers, dev_centers, sizeof(double)*k*dim, cudaMemcpyDeviceToDevice);
cublasDaxpy(handle, k*dim, &alpha, dev_new_centers, 1, dev_temp_centers, 1);
//Now find the norm2 of the new_centers
cublasDnrm2(handle, k*dim, dev_temp_centers, 1, &check);
*/
return cost;
}
|
d541c4204b07cdd790838119919fd5d31d9870f3.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4_per_chan_id.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
| d541c4204b07cdd790838119919fd5d31d9870f3.cu | /**
* \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4_per_chan_id.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
57e0bf72550fecb8f282aade53c24319b80c0c59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
void my_cudasafe( hipError_t error, char* message)
{
if(error!=hipSuccess)
{
fprintf(stderr,"ERROR: %s : %s\n",message,hipGetErrorString(error));
exit(-1);
}
}
__global__ void arradd(int* md, int* nd, int* pd, int size)
{
int myid = blockIdx.x*blockDim.x + threadIdx.x;
pd[myid] = md[myid] + nd[myid];
}
int main()
{
int size = 2000 * sizeof(int);
int m[2000], n[2000], p[2000],*md, *nd,*pd;
int i=0;
for(i=0; i<2000; i++ )
{
m[i] = i;
n[i] = i;
p[i] = 0;
}
my_cudasafe(hipMalloc(&md, 0),"Cuda malloc : md");
my_cudasafe(hipMemcpy(md, m, size, hipMemcpyHostToDevice),"Cuda memcopy H2D: md");
my_cudasafe(hipMalloc(&nd, size),"Cuda malloc : nd");
my_cudasafe(hipMemcpy(nd, n, size, hipMemcpyHostToDevice),"Cuda memcopy H2D: nd");
my_cudasafe(hipMalloc(&pd, size),"Cuda malloc : pd");
dim3 DimGrid(10, 1);
dim3 DimBlock(2000, 1);
hipLaunchKernelGGL(( arradd), dim3(DimGrid),dim3(DimBlock) , 0, 0, md,nd,pd,size);
my_cudasafe(hipGetLastError(), "arradd kernel");
my_cudasafe(hipMemcpy(p, pd, size, hipMemcpyDeviceToHost),"Cuda memcopy D2H: pd");
my_cudasafe(hipFree(md),"hipFree md");
my_cudasafe(hipFree(nd),"hipFree nd");
my_cudasafe(hipFree(pd),"hipFree pd");
for(i=0; i<2000; i++ )
{
printf("\t%d",p[i]);
}
}
| 57e0bf72550fecb8f282aade53c24319b80c0c59.cu | #include<stdio.h>
#include<stdlib.h>
void my_cudasafe( cudaError_t error, char* message)
{
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s : %s\n",message,cudaGetErrorString(error));
exit(-1);
}
}
__global__ void arradd(int* md, int* nd, int* pd, int size)
{
int myid = blockIdx.x*blockDim.x + threadIdx.x;
pd[myid] = md[myid] + nd[myid];
}
int main()
{
int size = 2000 * sizeof(int);
int m[2000], n[2000], p[2000],*md, *nd,*pd;
int i=0;
for(i=0; i<2000; i++ )
{
m[i] = i;
n[i] = i;
p[i] = 0;
}
my_cudasafe(cudaMalloc(&md, 0),"Cuda malloc : md");
my_cudasafe(cudaMemcpy(md, m, size, cudaMemcpyHostToDevice),"Cuda memcopy H2D: md");
my_cudasafe(cudaMalloc(&nd, size),"Cuda malloc : nd");
my_cudasafe(cudaMemcpy(nd, n, size, cudaMemcpyHostToDevice),"Cuda memcopy H2D: nd");
my_cudasafe(cudaMalloc(&pd, size),"Cuda malloc : pd");
dim3 DimGrid(10, 1);
dim3 DimBlock(2000, 1);
arradd<<< DimGrid,DimBlock >>>(md,nd,pd,size);
my_cudasafe(cudaGetLastError(), "arradd kernel");
my_cudasafe(cudaMemcpy(p, pd, size, cudaMemcpyDeviceToHost),"Cuda memcopy D2H: pd");
my_cudasafe(cudaFree(md),"cudaFree md");
my_cudasafe(cudaFree(nd),"cudaFree nd");
my_cudasafe(cudaFree(pd),"cudaFree pd");
for(i=0; i<2000; i++ )
{
printf("\t%d",p[i]);
}
}
|
eca1c4a6c07cb119637ab6a93496b9c09b17d3d7.hip | // !!! This is a file automatically generated by hipify!!!
/*
Reference:
Parallel Scan for Stream Architectures1
Duane Merrill Andrew Grimshaw
inclusive scan
*/
#include "scan_header.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdlib.h>
#include <helper_cuda.h>
#include <chrono>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
__global__ void reduce(float4 *d_input, float *d_output){
__shared__ float s_data[BLOCKDIM * 2];//1 cell per thread + another blockdim for easier indx management
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_input += idx;
d_output += blockIdx.x;
float4 item = *d_input;
float sum = item.w + item.x + item.y + item.z;
s_data[threadIdx.x] = sum;
__syncthreads();
// we reduce and put the result on the second half of shared memory
float *a = s_data;
#pragma unroll
for(int d = LOG2_BLOCKDIM; d > 0; d--){
if( threadIdx.x < (1 << (d - 1)) ){
a[(1 << d) + threadIdx.x] = a[2 * threadIdx.x] + a[2 * threadIdx.x + 1];
}
a = &a[(1 << d)];
__syncthreads();
}
// output the sum
if(threadIdx.x == 0){
d_output[0] = a[0];
}
}
// 1 block
__global__ void middle_scan(float *d_input, int iter_per_thread){
__shared__ float s_data[BLOCKDIM * 2];
float seed = 0;
d_input += threadIdx.x;
// cyclically scan, with the result of each scan becoming the seed to the next
#pragma unroll
for(int batch = 0; batch < iter_per_thread; batch++){
s_data[threadIdx.x] = d_input[batch * iter_per_thread];
__syncthreads();
//upsweep
float *a = s_data;
#pragma unroll
for(int d = LOG2_BLOCKDIM; d > 1; d--){ // we don't need last sum, inclusive scan so, the seed = first element
if( threadIdx.x < (1 << (d - 1)) ){
a[(1 << d) + threadIdx.x] = a[2 * threadIdx.x] + a[2 * threadIdx.x + 1];
}
a += (1 << d);
__syncthreads();
}
if(threadIdx.x == 0){
a[1] = a[0];
a[0] = seed;
}
__syncthreads();
// downsweep
#pragma unroll
for(int d = 2; d <= LOG2_BLOCKDIM; d++){
a -= (1 << d);
if( threadIdx.x < (1 << (d - 1)) ){
a[2 * threadIdx.x + 1] = a[2 * threadIdx.x] + a[(1 << d) + threadIdx.x];
a[2 * threadIdx.x] = a[(1 << d) + threadIdx.x];
}
__syncthreads();
}
d_input[batch * iter_per_thread] = s_data[threadIdx.x];
if(threadIdx.x == 0){
seed = s_data[BLOCKDIM - 1];
}
}
}
__global__ void lower_scan(float4 *d_input, float *d_scan, float4 *d_output){
__shared__ float s_data[BLOCKDIM * 2]; //1 cell per thread + another blockdim for easier indx management
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_input += idx;
float4 item = *d_input;
float sum = item.w + item.x + item.y + item.z;
s_data[threadIdx.x] = sum;
__syncthreads();
// we reduce and put the result on the second half of shared memory
float *a = s_data;
#pragma unroll
for(int d = LOG2_BLOCKDIM; d > 1; d--){
if( threadIdx.x < (1 << (d - 1)) ){
a[(1 << d) + threadIdx.x] = a[2 * threadIdx.x] + a[2 * threadIdx.x + 1];
}
a = &a[(1 << d)];
__syncthreads();
}
if(threadIdx.x == 0){
a[1] = a[0];
a[0] = d_scan[blockIdx.x];
}
__syncthreads();
// downsweep
#pragma unroll
for(int d = 2; d <= LOG2_BLOCKDIM; d++){
a -= (1 << d);
if( threadIdx.x < (1 << (d - 1)) ){
a[2 * threadIdx.x + 1] = a[2 * threadIdx.x] + a[(1 << d) + threadIdx.x];
a[2 * threadIdx.x] = a[(1 << d) + threadIdx.x];
}
__syncthreads();
}
item.x += s_data[threadIdx.x];
item.y += item.x;
item.z += item.y;
item.w += item.z;
d_output[idx] = item;
}
void scan(float4* d_input, float4* d_output, int arr_size){
int temp = ((arr_size >> 2) + BLOCKDIM - 1)/BLOCKDIM; // each thread processes 1 float4
dim3 dimBlock(BLOCKDIM);
dim3 dimGrid(temp);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float total_time = 0;
float elapsed_time;
float *d_scan;
hipMalloc((void **)&d_scan, temp * sizeof(float));
hipEventRecord(start, 0);
hipLaunchKernelGGL(( reduce), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input, d_scan);
//hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf( "reduce: %.8f ms\n", elapsed_time);
total_time += elapsed_time;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( middle_scan), dim3(1), dim3(dimBlock), 0, 0, d_scan, temp/BLOCKDIM);
//hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf( "middle scan: %.8f ms\n", elapsed_time);
total_time += elapsed_time;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( lower_scan), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input, d_scan, d_output);
//hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf( "final scan: %.8f ms\n", elapsed_time);
total_time += elapsed_time;
printf("total time GPU %.8fms\n", total_time);
float *temp_thrust;
float *temp_input_thrust = (float*) d_input;
hipMalloc((void **)&temp_thrust, arr_size * sizeof(float));
thrust::device_ptr<float> dev_ptr = thrust::device_pointer_cast(temp_input_thrust);
thrust::device_ptr<float> dev_ptr_out = thrust::device_pointer_cast(temp_thrust);
hipEventRecord(start, 0);
thrust::inclusive_scan(dev_ptr, dev_ptr + arr_size, dev_ptr_out);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf( "thrust scan: %.8f ms\n", elapsed_time);
float *h_thrust = (float*) malloc(arr_size * sizeof(float));
hipMemcpy(h_thrust, temp_thrust, arr_size * sizeof(float), hipMemcpyDeviceToHost);
std::cout<<"---------THRUST---------\n";
for(int i = 0; i < 20; i++){
std::cout<<h_thrust[i]<< " ";
if(((i%4) == 0) && i){
std::cout<<"\n";
}
}
hipFree(temp_thrust);
hipFree(d_scan);
hipEventDestroy(start);
hipEventDestroy(stop);
}
void fill_array(float4 *h_input, int arr_size){
float *temp = (float*) h_input;
for(int i = 0; i < arr_size; i++){
temp[i] = (float) rand() / RAND_MAX;
}
}
int check_solution(float4 *h_input, float4 *h_output, int arr_size){
float *temp, *h_input1, *h_output1;
h_input1 = (float*)h_input;
h_output1 = (float*)h_output;
temp = (float*) malloc(arr_size * sizeof(float));
auto tic = std::chrono::high_resolution_clock::now();
temp[0] = h_input1[0];
for(int i = 1; i < arr_size; i++){
temp[i] = temp[i - 1] + h_input1[i];
}
auto toc = std::chrono::high_resolution_clock::now();
printf("total time CPU %.8fms\n", (std::chrono::duration_cast <std::chrono::milliseconds> (toc - tic)).count() * 1.0);
std::cout<<"---------CPU---------\n";
for(int i = 0; i < 20; i++){
std::cout<<temp[i]<< " ";
if(((i%4) == 0) && i){
std::cout<<"\n";
}
}
int correct = 1;
/*for(int i = 0; i < arr_size; i++){
if(((temp[i] - h_output1[i]) * (temp[i] - h_output1[i])) > 0.00000001){
correct = 0;
break;
}
}*/
return correct;
}
int main(void){
srand(0);
float4 *h_input, *h_output;
float4 *d_input, *d_output;
int arr_size = 1 << 25;
h_input = (float4*) malloc(arr_size * sizeof(float));
h_output = (float4*) malloc(arr_size * sizeof(float));
fill_array(h_input, arr_size);
for(int i = 0; i < 5; i++){
std::cout<<h_input[i].x<<" "<<h_input[i].y<<" "<<h_input[i].z<<" "<<h_input[i].w<<"\n";
}
hipMalloc((void **)&d_input, arr_size * sizeof(float));
hipMalloc((void **)&d_output, arr_size * sizeof(float));
hipMemcpy(d_input, h_input, arr_size * sizeof(float), hipMemcpyHostToDevice);
scan(d_input, d_output, arr_size);
hipDeviceSynchronize();
hipMemcpy(h_output, d_output, arr_size * sizeof(float), hipMemcpyDeviceToHost);
std::cout<<"--------GPU----------\n";
for(int i = 0; i < 5; i++){
std::cout<<h_output[i].x<<" "<<h_output[i].y<<" "<<h_output[i].z<<" "<<h_output[i].w<<"\n";
}
check_solution(h_input, h_output, arr_size) ? std::cout<<"\ngood" : std::cout<<"\nbad";
hipFree(d_input);
hipFree(d_output);
free(h_input);
free(h_output);
return 0;
} | eca1c4a6c07cb119637ab6a93496b9c09b17d3d7.cu | /*
Reference:
Parallel Scan for Stream Architectures1
Duane Merrill Andrew Grimshaw
inclusive scan
*/
#include "scan_header.h"
#include <cuda_runtime.h>
#include <iostream>
#include <stdlib.h>
#include <helper_cuda.h>
#include <chrono>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
__global__ void reduce(float4 *d_input, float *d_output){
__shared__ float s_data[BLOCKDIM * 2];//1 cell per thread + another blockdim for easier indx management
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_input += idx;
d_output += blockIdx.x;
float4 item = *d_input;
float sum = item.w + item.x + item.y + item.z;
s_data[threadIdx.x] = sum;
__syncthreads();
// we reduce and put the result on the second half of shared memory
float *a = s_data;
#pragma unroll
for(int d = LOG2_BLOCKDIM; d > 0; d--){
if( threadIdx.x < (1 << (d - 1)) ){
a[(1 << d) + threadIdx.x] = a[2 * threadIdx.x] + a[2 * threadIdx.x + 1];
}
a = &a[(1 << d)];
__syncthreads();
}
// output the sum
if(threadIdx.x == 0){
d_output[0] = a[0];
}
}
// 1 block
__global__ void middle_scan(float *d_input, int iter_per_thread){
__shared__ float s_data[BLOCKDIM * 2];
float seed = 0;
d_input += threadIdx.x;
// cyclically scan, with the result of each scan becoming the seed to the next
#pragma unroll
for(int batch = 0; batch < iter_per_thread; batch++){
s_data[threadIdx.x] = d_input[batch * iter_per_thread];
__syncthreads();
//upsweep
float *a = s_data;
#pragma unroll
for(int d = LOG2_BLOCKDIM; d > 1; d--){ // we don't need last sum, inclusive scan so, the seed = first element
if( threadIdx.x < (1 << (d - 1)) ){
a[(1 << d) + threadIdx.x] = a[2 * threadIdx.x] + a[2 * threadIdx.x + 1];
}
a += (1 << d);
__syncthreads();
}
if(threadIdx.x == 0){
a[1] = a[0];
a[0] = seed;
}
__syncthreads();
// downsweep
#pragma unroll
for(int d = 2; d <= LOG2_BLOCKDIM; d++){
a -= (1 << d);
if( threadIdx.x < (1 << (d - 1)) ){
a[2 * threadIdx.x + 1] = a[2 * threadIdx.x] + a[(1 << d) + threadIdx.x];
a[2 * threadIdx.x] = a[(1 << d) + threadIdx.x];
}
__syncthreads();
}
d_input[batch * iter_per_thread] = s_data[threadIdx.x];
if(threadIdx.x == 0){
seed = s_data[BLOCKDIM - 1];
}
}
}
__global__ void lower_scan(float4 *d_input, float *d_scan, float4 *d_output){
__shared__ float s_data[BLOCKDIM * 2]; //1 cell per thread + another blockdim for easier indx management
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_input += idx;
float4 item = *d_input;
float sum = item.w + item.x + item.y + item.z;
s_data[threadIdx.x] = sum;
__syncthreads();
// we reduce and put the result on the second half of shared memory
float *a = s_data;
#pragma unroll
for(int d = LOG2_BLOCKDIM; d > 1; d--){
if( threadIdx.x < (1 << (d - 1)) ){
a[(1 << d) + threadIdx.x] = a[2 * threadIdx.x] + a[2 * threadIdx.x + 1];
}
a = &a[(1 << d)];
__syncthreads();
}
if(threadIdx.x == 0){
a[1] = a[0];
a[0] = d_scan[blockIdx.x];
}
__syncthreads();
// downsweep
#pragma unroll
for(int d = 2; d <= LOG2_BLOCKDIM; d++){
a -= (1 << d);
if( threadIdx.x < (1 << (d - 1)) ){
a[2 * threadIdx.x + 1] = a[2 * threadIdx.x] + a[(1 << d) + threadIdx.x];
a[2 * threadIdx.x] = a[(1 << d) + threadIdx.x];
}
__syncthreads();
}
item.x += s_data[threadIdx.x];
item.y += item.x;
item.z += item.y;
item.w += item.z;
d_output[idx] = item;
}
void scan(float4* d_input, float4* d_output, int arr_size){
int temp = ((arr_size >> 2) + BLOCKDIM - 1)/BLOCKDIM; // each thread processes 1 float4
dim3 dimBlock(BLOCKDIM);
dim3 dimGrid(temp);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float total_time = 0;
float elapsed_time;
float *d_scan;
cudaMalloc((void **)&d_scan, temp * sizeof(float));
cudaEventRecord(start, 0);
reduce<<<dimGrid, dimBlock>>>(d_input, d_scan);
//cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf( "reduce: %.8f ms\n", elapsed_time);
total_time += elapsed_time;
cudaEventRecord(start, 0);
middle_scan<<<1, dimBlock>>>(d_scan, temp/BLOCKDIM);
//cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf( "middle scan: %.8f ms\n", elapsed_time);
total_time += elapsed_time;
cudaEventRecord(start, 0);
lower_scan<<<dimGrid, dimBlock>>>(d_input, d_scan, d_output);
//cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf( "final scan: %.8f ms\n", elapsed_time);
total_time += elapsed_time;
printf("total time GPU %.8fms\n", total_time);
float *temp_thrust;
float *temp_input_thrust = (float*) d_input;
cudaMalloc((void **)&temp_thrust, arr_size * sizeof(float));
thrust::device_ptr<float> dev_ptr = thrust::device_pointer_cast(temp_input_thrust);
thrust::device_ptr<float> dev_ptr_out = thrust::device_pointer_cast(temp_thrust);
cudaEventRecord(start, 0);
thrust::inclusive_scan(dev_ptr, dev_ptr + arr_size, dev_ptr_out);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf( "thrust scan: %.8f ms\n", elapsed_time);
float *h_thrust = (float*) malloc(arr_size * sizeof(float));
cudaMemcpy(h_thrust, temp_thrust, arr_size * sizeof(float), cudaMemcpyDeviceToHost);
std::cout<<"---------THRUST---------\n";
for(int i = 0; i < 20; i++){
std::cout<<h_thrust[i]<< " ";
if(((i%4) == 0) && i){
std::cout<<"\n";
}
}
cudaFree(temp_thrust);
cudaFree(d_scan);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void fill_array(float4 *h_input, int arr_size){
float *temp = (float*) h_input;
for(int i = 0; i < arr_size; i++){
temp[i] = (float) rand() / RAND_MAX;
}
}
int check_solution(float4 *h_input, float4 *h_output, int arr_size){
float *temp, *h_input1, *h_output1;
h_input1 = (float*)h_input;
h_output1 = (float*)h_output;
temp = (float*) malloc(arr_size * sizeof(float));
auto tic = std::chrono::high_resolution_clock::now();
temp[0] = h_input1[0];
for(int i = 1; i < arr_size; i++){
temp[i] = temp[i - 1] + h_input1[i];
}
auto toc = std::chrono::high_resolution_clock::now();
printf("total time CPU %.8fms\n", (std::chrono::duration_cast <std::chrono::milliseconds> (toc - tic)).count() * 1.0);
std::cout<<"---------CPU---------\n";
for(int i = 0; i < 20; i++){
std::cout<<temp[i]<< " ";
if(((i%4) == 0) && i){
std::cout<<"\n";
}
}
int correct = 1;
/*for(int i = 0; i < arr_size; i++){
if(((temp[i] - h_output1[i]) * (temp[i] - h_output1[i])) > 0.00000001){
correct = 0;
break;
}
}*/
return correct;
}
int main(void){
srand(0);
float4 *h_input, *h_output;
float4 *d_input, *d_output;
int arr_size = 1 << 25;
h_input = (float4*) malloc(arr_size * sizeof(float));
h_output = (float4*) malloc(arr_size * sizeof(float));
fill_array(h_input, arr_size);
for(int i = 0; i < 5; i++){
std::cout<<h_input[i].x<<" "<<h_input[i].y<<" "<<h_input[i].z<<" "<<h_input[i].w<<"\n";
}
cudaMalloc((void **)&d_input, arr_size * sizeof(float));
cudaMalloc((void **)&d_output, arr_size * sizeof(float));
cudaMemcpy(d_input, h_input, arr_size * sizeof(float), cudaMemcpyHostToDevice);
scan(d_input, d_output, arr_size);
cudaDeviceSynchronize();
cudaMemcpy(h_output, d_output, arr_size * sizeof(float), cudaMemcpyDeviceToHost);
std::cout<<"--------GPU----------\n";
for(int i = 0; i < 5; i++){
std::cout<<h_output[i].x<<" "<<h_output[i].y<<" "<<h_output[i].z<<" "<<h_output[i].w<<"\n";
}
check_solution(h_input, h_output, arr_size) ? std::cout<<"\ngood" : std::cout<<"\nbad";
cudaFree(d_input);
cudaFree(d_output);
free(h_input);
free(h_output);
return 0;
} |
98abef652fe6a055bfd67032cc7dc70bfe658410.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <c10/util/Exception.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
namespace at {
namespace native {
namespace {
// Block size for weight_norm_*_first_dim_kernel.
// Currently, kernels are non-persistent.
// Dialing up the block size to, say 1024, can improve performance by
// increase the amount of cache available per block, which can improve cache hit rate.
// However, this is less efficient for short rows. 256 is pretty versatile.
// May be worth implementing heuristics later.
#define BLOCK 256
// Block size for weight_norm_*_last_dim_kernel.
// This is tricker than the first_dim case because we must make blocks
// at least 16 fast elements wide to ensure fully-coalesced half-precision accesses.
// Since output-element parallelism is along the fast dimension, this reduces the number of
// blocks we can launch by 16X.
#define TILE_W 16
// Somewhat versatile strategy: max out intra-block parallelism by extending
// blocks across the slow dimension up to the hardware-max block size of 1024.
#define TILE_H 64
template<typename T, typename ReduceOp>
__device__ __forceinline__ void reduce_block_into_lanes
(T *x,
T val,
int lanes, // lanes is intended to be <= 32.
ReduceOp reduceOp)
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = reduceOp(x[tid], x[tid+i]);
__syncthreads();
}
if(tid < 32)
{
T final;
if(blockSize >= 64)
final = reduceOp(x[tid], x[tid+32]);
else
final = val;
// __SYNCWARP();
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = 16; i >= lanes; i >>= 1)
final = reduceOp(final, WARP_SHFL_DOWN(final, i));
if(tid < lanes)
x[tid] = final; // EpilogueOp
}
// Make sure the smem result is visible to all warps.
__syncthreads();
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_first_dim_kernel
(scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int rowSize)
{
// We are norming each slowest-dim row of the tensor separately.
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
result = sqrtf(result);
if(tid == 0)
norms[row] = result;
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]);
accscalar_t rnorm = 1.f/result; // for consistency with backward kernel
// Write data to output
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_last_dim_kernel
(
scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int fast_dim_size,
const int slower_dims_size
)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* alloc = (accscalar_t*)buf;
accscalar_t* s = &alloc[0];
accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y];
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
// Better to pass an EpilogueOp to reduce_block_into_lanes?
if(threadIdx.y == 0)
{
accscalar_t result = s[threadIdx.x];
accscalar_t norm_this_col = sqrtf(result);
norms[fast_dim_location] = norm_this_col;
rnorms_this_block[threadIdx.x] = 1.f/norm_this_col;
}
__syncthreads();
accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]);
accscalar_t rnorm = rnorms_this_block[threadIdx.x];
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_first_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int rowSize)
{
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[i+rowStart]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[i+rowStart]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
// Could choose to save reciprocal of norm instead I suppose, but norms is probably
// more handy to keep around.
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[row];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(tid == 0)
grad_g[row] = scalar_cast<scalar_t>(result*rnorm);
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(saved_g[row]);
// Write v gradients. We are reusing values that were loaded earlier, so there
// is an optimization opportunity here (store values persistently).
for(int j = tid; j < rowSize; j += stride )
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[j+rowStart]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[j+rowStart]);
accscalar_t grad_vj = g_this_row*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[j+rowStart] = scalar_cast<scalar_t>(grad_vj);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_last_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int fast_dim_size,
const int slower_dims_size)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[currentIdx]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
accscalar_t result = s[threadIdx.x];
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[fast_dim_location];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(threadIdx.y == 0)
grad_g[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm);
// Entire block pulls these values, could use shared memory instead.
accscalar_t g_this_col = scalar_cast<accscalar_t>(saved_g[fast_dim_location]);
// Write v gradients.
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[currentIdx]);
accscalar_t grad_vj = g_this_col*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[currentIdx] = scalar_cast<scalar_t>(grad_vj);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
} // anonymous namespace
std::tuple<Tensor,Tensor> weight_norm_cuda
(const Tensor & v,
const Tensor & g,
int64_t dim)
{
auto w = at::empty_like(v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// weight_norm_fused does have a derivative defined in derivatives.yaml, therefore, VariableType.cpp
// sends the unpacked g.data() as the argument. In other words, we expect "g" is a bare Tensor here.
// norms is only needed to stash for backward.
// g.scalar_type() may be at::ScalarType::Double, Float, or Half.
// If Half, stash norms as float.
at::ScalarType AccType = g.scalar_type() == at::ScalarType::Half ?
at::ScalarType::Float : g.scalar_type();
// Will this create norms on the same device as g, regardless of what the thread's default
// current device is? I believe so, because Type::* functions are DeviceGuard()ed.
auto norms = at::empty_strided(g.sizes(), g.strides(), g.options().dtype(AccType));
const int ndims = v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= v.size(i);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_fwd_first_dim_kernel<scalar_t, accscalar_t>)
, dim3(v.size(0)),
dim3(BLOCK),
BLOCK*sizeof(accscalar_t),
stream,
w.data_ptr<scalar_t>(),
norms.data_ptr<accscalar_t>(),
v.data_ptr<scalar_t>(),
g.data_ptr<scalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= v.size(i);
int fast_dim_size = v.size(ndims-1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_fwd_last_dim_kernel<scalar_t, accscalar_t>)
, dim3((fast_dim_size+TILE_W-1)/TILE_W),
dim3(dim3(TILE_W,TILE_H)),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream,
w.data_ptr<scalar_t>(),
norms.data_ptr<accscalar_t>(),
v.data_ptr<scalar_t>(),
g.data_ptr<scalar_t>(),
fast_dim_size,
slower_dims_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, the foregoing is the best we can do.
return std::tuple<Tensor, Tensor>{w, norms};
}
std::tuple<Tensor, Tensor> weight_norm_cuda_backward
(const Tensor & grad_w,
const Tensor & saved_v,
const Tensor & saved_g,
const Tensor & saved_norms,
int64_t dim)
{
// These checks should always succeed, because weight_norm_fused_backward should only
// ever be recorded in the autograd graph via weight_norm, which passes contiguous v and g.
TORCH_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous");
TORCH_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous");
TORCH_CHECK(saved_norms.is_contiguous(), "saved_norms must be contiguous");
TORCH_CHECK(dim == 0 || dim == saved_v.dim() - 1, "fused kernels can only be applied for first or last dim")
auto grad_v = at::empty_like(saved_v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto grad_g = at::empty_like(saved_g, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const int ndims = saved_v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= saved_v.size(i);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_bwd_first_dim_kernel<scalar_t, accscalar_t>)
, dim3(grad_w.size(0)),
dim3(BLOCK),
BLOCK*sizeof(accscalar_t),
stream,
grad_v.data_ptr<scalar_t>(),
grad_g.data_ptr<scalar_t>(),
grad_w.data_ptr<scalar_t>(),
saved_v.data_ptr<scalar_t>(),
saved_g.data_ptr<scalar_t>(),
saved_norms.data_ptr<accscalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array.
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= saved_v.size(i);
int fast_dim_size = saved_v.size(ndims-1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( weight_norm_bwd_last_dim_kernel<scalar_t, accscalar_t>)
, dim3((fast_dim_size+TILE_W-1)/TILE_W),
dim3(dim3(TILE_W,TILE_H)),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream,
grad_v.data_ptr<scalar_t>(),
grad_g.data_ptr<scalar_t>(),
grad_w.data_ptr<scalar_t>(),
saved_v.data_ptr<scalar_t>(),
saved_g.data_ptr<scalar_t>(),
saved_norms.data_ptr<accscalar_t>(),
fast_dim_size,
slower_dims_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, the foregoing is the best we can do.
return std::tuple<Tensor, Tensor>{grad_v, grad_g};
}
#undef BLOCK
#undef TILE_W
#undef TILE_H
} // namespace native
} // namespace at
| 98abef652fe6a055bfd67032cc7dc70bfe658410.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <c10/util/Exception.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
namespace at {
namespace native {
namespace {
// Block size for weight_norm_*_first_dim_kernel.
// Currently, kernels are non-persistent.
// Dialing up the block size to, say 1024, can improve performance by
// increase the amount of cache available per block, which can improve cache hit rate.
// However, this is less efficient for short rows. 256 is pretty versatile.
// May be worth implementing heuristics later.
#define BLOCK 256
// Block size for weight_norm_*_last_dim_kernel.
// This is tricker than the first_dim case because we must make blocks
// at least 16 fast elements wide to ensure fully-coalesced half-precision accesses.
// Since output-element parallelism is along the fast dimension, this reduces the number of
// blocks we can launch by 16X.
#define TILE_W 16
// Somewhat versatile strategy: max out intra-block parallelism by extending
// blocks across the slow dimension up to the hardware-max block size of 1024.
#define TILE_H 64
template<typename T, typename ReduceOp>
__device__ __forceinline__ void reduce_block_into_lanes
(T *x,
T val,
int lanes, // lanes is intended to be <= 32.
ReduceOp reduceOp)
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = reduceOp(x[tid], x[tid+i]);
__syncthreads();
}
if(tid < 32)
{
T final;
if(blockSize >= 64)
final = reduceOp(x[tid], x[tid+32]);
else
final = val;
// __SYNCWARP();
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for(int i = 16; i >= lanes; i >>= 1)
final = reduceOp(final, WARP_SHFL_DOWN(final, i));
if(tid < lanes)
x[tid] = final; // EpilogueOp
}
// Make sure the smem result is visible to all warps.
__syncthreads();
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_first_dim_kernel
(scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int rowSize)
{
// We are norming each slowest-dim row of the tensor separately.
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
result = sqrtf(result);
if(tid == 0)
norms[row] = result;
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(g[row]);
accscalar_t rnorm = 1.f/result; // for consistency with backward kernel
// Write data to output
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[i+rowStart]);
w[i+rowStart] = scalar_cast<scalar_t>(g_this_row*val_f*rnorm);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_fwd_last_dim_kernel
(
scalar_t* __restrict__ w,
accscalar_t* __restrict__ norms,
const scalar_t* __restrict__ v,
const scalar_t* __restrict__ g,
const int fast_dim_size,
const int slower_dims_size
)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* alloc = (accscalar_t*)buf;
accscalar_t* s = &alloc[0];
accscalar_t* rnorms_this_block = &alloc[blockDim.x*blockDim.y];
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
thread_sum += val_f*val_f; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
// Better to pass an EpilogueOp to reduce_block_into_lanes?
if(threadIdx.y == 0)
{
accscalar_t result = s[threadIdx.x];
accscalar_t norm_this_col = sqrtf(result);
norms[fast_dim_location] = norm_this_col;
rnorms_this_block[threadIdx.x] = 1.f/norm_this_col;
}
__syncthreads();
accscalar_t g_this_col = scalar_cast<accscalar_t>(g[fast_dim_location]);
accscalar_t rnorm = rnorms_this_block[threadIdx.x];
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t val_f = scalar_cast<accscalar_t>(v[currentIdx]);
w[currentIdx] = scalar_cast<scalar_t>(g_this_col*val_f*rnorm);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_first_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int rowSize)
{
// For now, assign one block to each row.
const int tid = threadIdx.x;
const int row = blockIdx.x;
const int stride = blockDim.x;
// Logical index offset for this flattened row
const int rowStart = row*rowSize;
// Hack to get around nvcc complaining when an smem array is declared with the same name
// but different types in different kernels (in this case different instantiations)
// extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s"
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
for(int i = tid; i < rowSize; i += stride )
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[i+rowStart]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[i+rowStart]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
}
reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>());
accscalar_t result = s[0];
// Could choose to save reciprocal of norm instead I suppose, but norms is probably
// more handy to keep around.
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[row];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(tid == 0)
grad_g[row] = scalar_cast<scalar_t>(result*rnorm);
// Broadcast load, could use shared memory instead.
accscalar_t g_this_row = scalar_cast<accscalar_t>(saved_g[row]);
// Write v gradients. We are reusing values that were loaded earlier, so there
// is an optimization opportunity here (store values persistently).
for(int j = tid; j < rowSize; j += stride )
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[j+rowStart]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[j+rowStart]);
accscalar_t grad_vj = g_this_row*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[j+rowStart] = scalar_cast<scalar_t>(grad_vj);
}
}
template
<typename scalar_t,
typename accscalar_t>
__global__ void weight_norm_bwd_last_dim_kernel
(scalar_t* __restrict__ grad_v,
scalar_t* __restrict__ grad_g,
const scalar_t* __restrict__ grad_w,
const scalar_t* __restrict__ saved_v,
const scalar_t* __restrict__ saved_g,
const accscalar_t* __restrict__ saved_norms,
const int fast_dim_size,
const int slower_dims_size)
{
const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x;
extern __shared__ char buf[];
accscalar_t* s = (accscalar_t*)buf;
accscalar_t thread_sum = 0.f;
int slower_dims_location = threadIdx.y;
int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wi = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vi = scalar_cast<accscalar_t>(saved_v[currentIdx]);
thread_sum += grad_wi*saved_vi; // AccumOp, could do Kahan here
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>());
accscalar_t result = s[threadIdx.x];
// Broadcast load; could use shared memory instead.
accscalar_t rnorm = 1.f/saved_norms[fast_dim_location];
accscalar_t rnorm3 = rnorm*rnorm*rnorm;
// Write g gradients.
if(threadIdx.y == 0)
grad_g[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm);
// Entire block pulls these values, could use shared memory instead.
accscalar_t g_this_col = scalar_cast<accscalar_t>(saved_g[fast_dim_location]);
// Write v gradients.
slower_dims_location = threadIdx.y;
currentIdx = fast_dim_location + fast_dim_size*slower_dims_location;
if(fast_dim_location < fast_dim_size)
while(slower_dims_location < slower_dims_size)
{
accscalar_t grad_wj = scalar_cast<accscalar_t>(grad_w[currentIdx]);
accscalar_t saved_vj = scalar_cast<accscalar_t>(saved_v[currentIdx]);
accscalar_t grad_vj = g_this_col*(rnorm*grad_wj - rnorm3*saved_vj*result);
grad_v[currentIdx] = scalar_cast<scalar_t>(grad_vj);
currentIdx += blockDim.y*fast_dim_size;
slower_dims_location += blockDim.y;
}
}
} // anonymous namespace
std::tuple<Tensor,Tensor> weight_norm_cuda
(const Tensor & v,
const Tensor & g,
int64_t dim)
{
auto w = at::empty_like(v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// weight_norm_fused does have a derivative defined in derivatives.yaml, therefore, VariableType.cpp
// sends the unpacked g.data() as the argument. In other words, we expect "g" is a bare Tensor here.
// norms is only needed to stash for backward.
// g.scalar_type() may be at::ScalarType::Double, Float, or Half.
// If Half, stash norms as float.
at::ScalarType AccType = g.scalar_type() == at::ScalarType::Half ?
at::ScalarType::Float : g.scalar_type();
// Will this create norms on the same device as g, regardless of what the thread's default
// current device is? I believe so, because Type::* functions are DeviceGuard()ed.
auto norms = at::empty_strided(g.sizes(), g.strides(), g.options().dtype(AccType));
const int ndims = v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= v.size(i);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_fwd_first_dim_kernel<scalar_t, accscalar_t>
<<<v.size(0),
BLOCK,
BLOCK*sizeof(accscalar_t),
stream>>>
(w.data_ptr<scalar_t>(),
norms.data_ptr<accscalar_t>(),
v.data_ptr<scalar_t>(),
g.data_ptr<scalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= v.size(i);
int fast_dim_size = v.size(ndims-1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(v.scalar_type(),
"weight_norm_fwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_fwd_last_dim_kernel<scalar_t, accscalar_t>
<<<(fast_dim_size+TILE_W-1)/TILE_W,
dim3(TILE_W,TILE_H),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream>>>
(w.data_ptr<scalar_t>(),
norms.data_ptr<accscalar_t>(),
v.data_ptr<scalar_t>(),
g.data_ptr<scalar_t>(),
fast_dim_size,
slower_dims_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, the foregoing is the best we can do.
return std::tuple<Tensor, Tensor>{w, norms};
}
std::tuple<Tensor, Tensor> weight_norm_cuda_backward
(const Tensor & grad_w,
const Tensor & saved_v,
const Tensor & saved_g,
const Tensor & saved_norms,
int64_t dim)
{
// These checks should always succeed, because weight_norm_fused_backward should only
// ever be recorded in the autograd graph via weight_norm, which passes contiguous v and g.
TORCH_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous");
TORCH_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous");
TORCH_CHECK(saved_norms.is_contiguous(), "saved_norms must be contiguous");
TORCH_CHECK(dim == 0 || dim == saved_v.dim() - 1, "fused kernels can only be applied for first or last dim")
auto grad_v = at::empty_like(saved_v, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto grad_g = at::empty_like(saved_g, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
const int ndims = saved_v.dim();
if(dim == 0)
{
// Find logical size of each flattened slowest-dim row
int rowSize = 1;
for(int i = ndims - 1; i > 0; i--)
rowSize *= saved_v.size(i);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_first_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_bwd_first_dim_kernel<scalar_t, accscalar_t>
<<<grad_w.size(0),
BLOCK,
BLOCK*sizeof(accscalar_t),
stream>>>
(grad_v.data_ptr<scalar_t>(),
grad_g.data_ptr<scalar_t>(),
grad_w.data_ptr<scalar_t>(),
saved_v.data_ptr<scalar_t>(),
saved_g.data_ptr<scalar_t>(),
saved_norms.data_ptr<accscalar_t>(),
rowSize);
});
}
else if(dim == ndims - 1)
{
// Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array.
int slower_dims_size = 1;
for(int i = 0; i < ndims - 1; i++)
slower_dims_size *= saved_v.size(i);
int fast_dim_size = saved_v.size(ndims-1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(saved_v.scalar_type(),
"weight_norm_bwd_last_dim_kernel",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
weight_norm_bwd_last_dim_kernel<scalar_t, accscalar_t>
<<<(fast_dim_size+TILE_W-1)/TILE_W,
dim3(TILE_W,TILE_H),
(TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t),
stream>>>
(grad_v.data_ptr<scalar_t>(),
grad_g.data_ptr<scalar_t>(),
grad_w.data_ptr<scalar_t>(),
saved_v.data_ptr<scalar_t>(),
saved_g.data_ptr<scalar_t>(),
saved_norms.data_ptr<accscalar_t>(),
fast_dim_size,
slower_dims_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
// The kernel execution is asynchronous, so this will only catch errors on the kernel launch,
// not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught
// until a later error check on a synchronizing CUDA call. Unfortunately, without manually
// synchronizing here, the foregoing is the best we can do.
return std::tuple<Tensor, Tensor>{grad_v, grad_g};
}
#undef BLOCK
#undef TILE_W
#undef TILE_H
} // namespace native
} // namespace at
|
4551bcbbcf973f9788cd0e9c97c724138250c42e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cu_raycast.h"
#include "MatUtils.h"
#include "launch_utils.h"
namespace roo
{
//////////////////////////////////////////////////////
// Phong shading.
//////////////////////////////////////////////////////
__host__ __device__ inline
float PhongShade(const float3 p_c, const float3 n_c)
{
const float ambient = 0.4;
const float diffuse = 0.4;
const float specular = 0.2;
const float3 eyedir = -1.0f * p_c / length(p_c);
const float3 _lightdir = make_float3(0.4,0.4,-1);
const float3 lightdir = _lightdir / length(_lightdir);
const float ldotn = dot(lightdir,n_c);
const float3 lightreflect = 2*ldotn*n_c + (-1.0) * lightdir;
const float edotr = fmaxf(0,dot(eyedir,lightreflect));
const float spec = edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr;
return ambient + diffuse * ldotn + specular * spec;
}
//////////////////////////////////////////////////////
// Raycast SDF
//////////////////////////////////////////////////////
__global__ void KernRaycastSdf(Image<float> imgdepth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix )
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < img.w && v < img.h ) {
const float3 c_w = SE3Translation(T_wc);
const float3 ray_c = K.Unproject(u,v);
const float3 ray_w = mulSO3(T_wc, ray_c);
// Raycast bounding box to find valid ray segment of sdf
// http://www.cs.utah.edu/~awilliam/box/box.pdf
const float3 tminbound = (vol.bbox.Min() - c_w) / ray_w;
const float3 tmaxbound = (vol.bbox.Max() - c_w) / ray_w;
const float3 tmin = fminf(tminbound,tmaxbound);
const float3 tmax = fmaxf(tminbound,tmaxbound);
const float max_tmin = fmaxf(fmaxf(fmaxf(tmin.x, tmin.y), tmin.z), near);
const float min_tmax = fminf(fminf(fminf(tmax.x, tmax.y), tmax.z), far);
float depth = 0.0f;
// If ray intersects bounding box
if(max_tmin < min_tmax ) {
// Go between max_tmin and min_tmax
float lambda = max_tmin;
float last_sdf = 0.0f/0.0f;
float min_delta_lambda = vol.VoxelSizeUnits().x;
float delta_lambda = 0;
// March through space
while(lambda < min_tmax) {
const float3 pos_w = c_w + lambda * ray_w;
const float sdf = vol.GetUnitsTrilinearClamped(pos_w);
if( sdf <= 0 ) {
if( last_sdf > 0) {
// surface!
if(subpix) {
lambda = lambda + delta_lambda * sdf / (last_sdf - sdf);
}
depth = lambda;
}
break;
}
delta_lambda = sdf > 0 ? fmaxf(sdf, min_delta_lambda) : trunc_dist;
lambda += delta_lambda;
last_sdf = sdf;
}
}
// Compute normal
const float3 pos_w = c_w + depth * ray_w;
const float3 _n_w = vol.GetUnitsBackwardDiffDxDyDz(pos_w);
const float len_n_w = length(_n_w);
const float3 n_w = len_n_w > 0 ? _n_w / len_n_w : make_float3(0,0,1);
const float3 n_c = mulSO3inv(T_wc,n_w);
const float3 p_c = depth * ray_c;
if(depth > 0 ) {
// img(u,v) = (depth - near) / (far - near);
imgdepth(u,v) = depth;
img(u,v) = PhongShade(p_c, n_c);
// norm(u,v) = make_float4(0.5,0.5,0.5,1) + make_float4(n_c, 0) /2.0f;
norm(u,v) = make_float4(n_c, 1);
}else{
imgdepth(u,v) = 0.0f/0.0f;
img(u,v) = 0;
norm(u,v) = make_float4(0,0,0,0);
}
}
}
void RaycastSdf(Image<float> depth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix )
{
dim3 blockDim, gridDim;
#if __CUDA_ARCH__ < 300
InitDimFromOutputImageOver(blockDim, gridDim, img, 16, 16);
#else
InitDimFromOutputImageOver(blockDim, gridDim, img);
#endif
hipLaunchKernelGGL(( KernRaycastSdf), dim3(gridDim),dim3(blockDim), 0, 0, depth, norm, img, vol, T_wc, K, near, far, trunc_dist, subpix);
GpuCheckErrors();
}
//////////////////////////////////////////////////////
// Raycast Color SDF
//////////////////////////////////////////////////////
__global__ void KernRaycastSdf(Image<float> imgdepth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const BoundedVolume<float> colorVol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix )
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < img.w && v < img.h ) {
const float3 c_w = SE3Translation(T_wc);
const float3 ray_c = K.Unproject(u,v);
const float3 ray_w = mulSO3(T_wc, ray_c);
// Raycast bounding box to find valid ray segment of sdf
// http://www.cs.utah.edu/~awilliam/box/box.pdf
const float3 tminbound = (vol.bbox.Min() - c_w) / ray_w;
const float3 tmaxbound = (vol.bbox.Max() - c_w) / ray_w;
const float3 tmin = fminf(tminbound,tmaxbound);
const float3 tmax = fmaxf(tminbound,tmaxbound);
const float max_tmin = fmaxf(fmaxf(fmaxf(tmin.x, tmin.y), tmin.z), near);
const float min_tmax = fminf(fminf(fminf(tmax.x, tmax.y), tmax.z), far);
float depth = 0.0f;
// If ray intersects bounding box
if(max_tmin < min_tmax ) {
// Go between max_tmin and min_tmax
float lambda = max_tmin;
float last_sdf = 0.0f/0.0f;
float min_delta_lambda = vol.VoxelSizeUnits().x;
float delta_lambda = 0;
// March through space
while(lambda < min_tmax) {
const float3 pos_w = c_w + lambda * ray_w;
const float sdf = vol.GetUnitsTrilinearClamped(pos_w);
if( sdf <= 0 ) {
if( last_sdf > 0) {
// surface!
if(subpix) {
lambda = lambda + delta_lambda * sdf / (last_sdf - sdf);
}
depth = lambda;
}
break;
}
delta_lambda = sdf > 0 ? fmaxf(sdf, min_delta_lambda) : trunc_dist;
lambda += delta_lambda;
last_sdf = sdf;
}
}
// Compute normal
const float3 pos_w = c_w + depth * ray_w;
const float3 _n_w = vol.GetUnitsBackwardDiffDxDyDz(pos_w);
const float c = colorVol.GetUnitsTrilinearClamped(pos_w);
const float len_n_w = length(_n_w);
const float3 n_w = len_n_w > 0 ? _n_w / len_n_w : make_float3(0,0,1);
const float3 n_c = mulSO3inv(T_wc,n_w);
if(depth > 0 ) {
imgdepth(u,v) = depth;
img(u,v) = c;
norm(u,v) = make_float4(n_c, 1);
}else{
imgdepth(u,v) = 0.0f/0.0f;
img(u,v) = 0;
norm(u,v) = make_float4(0,0,0,0);
}
}
}
void RaycastSdf(Image<float> depth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const BoundedVolume<float> colorVol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix )
{
dim3 blockDim, gridDim;
#if __CUDA_ARCH__ < 300
InitDimFromOutputImageOver(blockDim, gridDim, img, 16, 16);
#else
InitDimFromOutputImageOver(blockDim, gridDim, img);
#endif
hipLaunchKernelGGL(( KernRaycastSdf), dim3(gridDim),dim3(blockDim), 0, 0, depth, norm, img, vol, colorVol, T_wc, K, near, far, trunc_dist, subpix);
GpuCheckErrors();
}
//////////////////////////////////////////////////////
// Raycast box
//////////////////////////////////////////////////////
__global__ void KernRaycastBox(Image<float> imgd, const Mat<float,3,4> T_wc, ImageIntrinsics K, const BoundingBox bbox )
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < imgd.w && v < imgd.h ) {
const float3 c_w = SE3Translation(T_wc);
const float3 ray_c = K.Unproject(u,v);
const float3 ray_w = mulSO3(T_wc, ray_c);
// Raycast bounding box to find valid ray segment of sdf
// http://www.cs.utah.edu/~awilliam/box/box.pdf
const float3 tminbound = (bbox.Min() - c_w) / ray_w;
const float3 tmaxbound = (bbox.Max() - c_w) / ray_w;
const float3 tmin = fminf(tminbound,tmaxbound);
const float3 tmax = fmaxf(tminbound,tmaxbound);
const float max_tmin = fmaxf(fmaxf(tmin.x, tmin.y), tmin.z);
const float min_tmax = fminf(fminf(tmax.x, tmax.y), tmax.z);
float d;
// If ray intersects bounding box
if(max_tmin < min_tmax ) {
d = max_tmin;
}else{
d = 0.0f/0.0f;
}
imgd(u,v) = d;
}
}
void RaycastBox(Image<float> imgd, const Mat<float,3,4> T_wc, ImageIntrinsics K, const BoundingBox bbox )
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim, gridDim, imgd);
hipLaunchKernelGGL(( KernRaycastBox), dim3(gridDim),dim3(blockDim), 0, 0, imgd, T_wc, K, bbox);
GpuCheckErrors();
}
//////////////////////////////////////////////////////
// Raycast sphere
//////////////////////////////////////////////////////
__global__ void KernRaycastSphere(Image<float> imgd, Image<float> img, ImageIntrinsics K, float3 center_c, float r)
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < imgd.w && v < imgd.h ) {
const float3 ray_c = K.Unproject(u,v);
const float ldotc = dot(ray_c,center_c);
const float lsq = dot(ray_c,ray_c);
const float csq = dot(center_c,center_c);
float depth = (ldotc - sqrt(ldotc*ldotc - lsq*(csq - r*r) )) / lsq;
const float prev_depth = imgd(u,v);
if(depth > 0 && (depth < prev_depth || !isfinite(prev_depth)) ) {
imgd(u,v) = depth;
if(img.ptr) {
const float3 p_c = depth * ray_c;
const float3 n_c = p_c - center_c;
img(u,v) = PhongShade(p_c, n_c / length(n_c));
}
}
}
}
void RaycastSphere(Image<float> imgd, Image<float> img, const Mat<float,3,4> T_wc, ImageIntrinsics K, float3 center, float r)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim, gridDim, imgd);
const float3 center_c = mulSE3inv(T_wc, center);
hipLaunchKernelGGL(( KernRaycastSphere), dim3(gridDim),dim3(blockDim), 0, 0, imgd, img, K, center_c, r);
GpuCheckErrors();
}
//////////////////////////////////////////////////////
// Raycast plane
//////////////////////////////////////////////////////
__global__ void KernRaycastPlane(Image<float> imgd, Image<float> img, ImageIntrinsics K, const float3 n_c)
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < img.w && v < img.h ) {
const float3 ray_c = K.Unproject(u,v);
const float depth = -1 / dot(n_c, ray_c);
const float prev_depth = imgd(u,v);
if(depth > 0 && (depth < prev_depth || !isfinite(prev_depth)) ) {
const float3 p_c = depth * ray_c;
img(u,v) = PhongShade(p_c, n_c / length(n_c) );
imgd(u,v) = depth;
}
}
}
void RaycastPlane(Image<float> imgd, Image<float> img, const Mat<float,3,4> T_wc, ImageIntrinsics K, const float3 n_w )
{
const float3 n_c = Plane_b_from_a(T_wc, n_w);
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim, gridDim, img);
hipLaunchKernelGGL(( KernRaycastPlane), dim3(gridDim),dim3(blockDim), 0, 0, imgd, img, K, n_c );
GpuCheckErrors();
}
}
| 4551bcbbcf973f9788cd0e9c97c724138250c42e.cu | #include "cu_raycast.h"
#include "MatUtils.h"
#include "launch_utils.h"
namespace roo
{
//////////////////////////////////////////////////////
// Phong shading.
//////////////////////////////////////////////////////
__host__ __device__ inline
float PhongShade(const float3 p_c, const float3 n_c)
{
const float ambient = 0.4;
const float diffuse = 0.4;
const float specular = 0.2;
const float3 eyedir = -1.0f * p_c / length(p_c);
const float3 _lightdir = make_float3(0.4,0.4,-1);
const float3 lightdir = _lightdir / length(_lightdir);
const float ldotn = dot(lightdir,n_c);
const float3 lightreflect = 2*ldotn*n_c + (-1.0) * lightdir;
const float edotr = fmaxf(0,dot(eyedir,lightreflect));
const float spec = edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr;
return ambient + diffuse * ldotn + specular * spec;
}
//////////////////////////////////////////////////////
// Raycast SDF
//////////////////////////////////////////////////////
__global__ void KernRaycastSdf(Image<float> imgdepth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix )
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < img.w && v < img.h ) {
const float3 c_w = SE3Translation(T_wc);
const float3 ray_c = K.Unproject(u,v);
const float3 ray_w = mulSO3(T_wc, ray_c);
// Raycast bounding box to find valid ray segment of sdf
// http://www.cs.utah.edu/~awilliam/box/box.pdf
const float3 tminbound = (vol.bbox.Min() - c_w) / ray_w;
const float3 tmaxbound = (vol.bbox.Max() - c_w) / ray_w;
const float3 tmin = fminf(tminbound,tmaxbound);
const float3 tmax = fmaxf(tminbound,tmaxbound);
const float max_tmin = fmaxf(fmaxf(fmaxf(tmin.x, tmin.y), tmin.z), near);
const float min_tmax = fminf(fminf(fminf(tmax.x, tmax.y), tmax.z), far);
float depth = 0.0f;
// If ray intersects bounding box
if(max_tmin < min_tmax ) {
// Go between max_tmin and min_tmax
float lambda = max_tmin;
float last_sdf = 0.0f/0.0f;
float min_delta_lambda = vol.VoxelSizeUnits().x;
float delta_lambda = 0;
// March through space
while(lambda < min_tmax) {
const float3 pos_w = c_w + lambda * ray_w;
const float sdf = vol.GetUnitsTrilinearClamped(pos_w);
if( sdf <= 0 ) {
if( last_sdf > 0) {
// surface!
if(subpix) {
lambda = lambda + delta_lambda * sdf / (last_sdf - sdf);
}
depth = lambda;
}
break;
}
delta_lambda = sdf > 0 ? fmaxf(sdf, min_delta_lambda) : trunc_dist;
lambda += delta_lambda;
last_sdf = sdf;
}
}
// Compute normal
const float3 pos_w = c_w + depth * ray_w;
const float3 _n_w = vol.GetUnitsBackwardDiffDxDyDz(pos_w);
const float len_n_w = length(_n_w);
const float3 n_w = len_n_w > 0 ? _n_w / len_n_w : make_float3(0,0,1);
const float3 n_c = mulSO3inv(T_wc,n_w);
const float3 p_c = depth * ray_c;
if(depth > 0 ) {
// img(u,v) = (depth - near) / (far - near);
imgdepth(u,v) = depth;
img(u,v) = PhongShade(p_c, n_c);
// norm(u,v) = make_float4(0.5,0.5,0.5,1) + make_float4(n_c, 0) /2.0f;
norm(u,v) = make_float4(n_c, 1);
}else{
imgdepth(u,v) = 0.0f/0.0f;
img(u,v) = 0;
norm(u,v) = make_float4(0,0,0,0);
}
}
}
void RaycastSdf(Image<float> depth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix )
{
dim3 blockDim, gridDim;
#if __CUDA_ARCH__ < 300
InitDimFromOutputImageOver(blockDim, gridDim, img, 16, 16);
#else
InitDimFromOutputImageOver(blockDim, gridDim, img);
#endif
KernRaycastSdf<<<gridDim,blockDim>>>(depth, norm, img, vol, T_wc, K, near, far, trunc_dist, subpix);
GpuCheckErrors();
}
//////////////////////////////////////////////////////
// Raycast Color SDF
//////////////////////////////////////////////////////
__global__ void KernRaycastSdf(Image<float> imgdepth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const BoundedVolume<float> colorVol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix )
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < img.w && v < img.h ) {
const float3 c_w = SE3Translation(T_wc);
const float3 ray_c = K.Unproject(u,v);
const float3 ray_w = mulSO3(T_wc, ray_c);
// Raycast bounding box to find valid ray segment of sdf
// http://www.cs.utah.edu/~awilliam/box/box.pdf
const float3 tminbound = (vol.bbox.Min() - c_w) / ray_w;
const float3 tmaxbound = (vol.bbox.Max() - c_w) / ray_w;
const float3 tmin = fminf(tminbound,tmaxbound);
const float3 tmax = fmaxf(tminbound,tmaxbound);
const float max_tmin = fmaxf(fmaxf(fmaxf(tmin.x, tmin.y), tmin.z), near);
const float min_tmax = fminf(fminf(fminf(tmax.x, tmax.y), tmax.z), far);
float depth = 0.0f;
// If ray intersects bounding box
if(max_tmin < min_tmax ) {
// Go between max_tmin and min_tmax
float lambda = max_tmin;
float last_sdf = 0.0f/0.0f;
float min_delta_lambda = vol.VoxelSizeUnits().x;
float delta_lambda = 0;
// March through space
while(lambda < min_tmax) {
const float3 pos_w = c_w + lambda * ray_w;
const float sdf = vol.GetUnitsTrilinearClamped(pos_w);
if( sdf <= 0 ) {
if( last_sdf > 0) {
// surface!
if(subpix) {
lambda = lambda + delta_lambda * sdf / (last_sdf - sdf);
}
depth = lambda;
}
break;
}
delta_lambda = sdf > 0 ? fmaxf(sdf, min_delta_lambda) : trunc_dist;
lambda += delta_lambda;
last_sdf = sdf;
}
}
// Compute normal
const float3 pos_w = c_w + depth * ray_w;
const float3 _n_w = vol.GetUnitsBackwardDiffDxDyDz(pos_w);
const float c = colorVol.GetUnitsTrilinearClamped(pos_w);
const float len_n_w = length(_n_w);
const float3 n_w = len_n_w > 0 ? _n_w / len_n_w : make_float3(0,0,1);
const float3 n_c = mulSO3inv(T_wc,n_w);
if(depth > 0 ) {
imgdepth(u,v) = depth;
img(u,v) = c;
norm(u,v) = make_float4(n_c, 1);
}else{
imgdepth(u,v) = 0.0f/0.0f;
img(u,v) = 0;
norm(u,v) = make_float4(0,0,0,0);
}
}
}
void RaycastSdf(Image<float> depth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const BoundedVolume<float> colorVol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix )
{
dim3 blockDim, gridDim;
#if __CUDA_ARCH__ < 300
InitDimFromOutputImageOver(blockDim, gridDim, img, 16, 16);
#else
InitDimFromOutputImageOver(blockDim, gridDim, img);
#endif
KernRaycastSdf<<<gridDim,blockDim>>>(depth, norm, img, vol, colorVol, T_wc, K, near, far, trunc_dist, subpix);
GpuCheckErrors();
}
//////////////////////////////////////////////////////
// Raycast box
//////////////////////////////////////////////////////
__global__ void KernRaycastBox(Image<float> imgd, const Mat<float,3,4> T_wc, ImageIntrinsics K, const BoundingBox bbox )
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < imgd.w && v < imgd.h ) {
const float3 c_w = SE3Translation(T_wc);
const float3 ray_c = K.Unproject(u,v);
const float3 ray_w = mulSO3(T_wc, ray_c);
// Raycast bounding box to find valid ray segment of sdf
// http://www.cs.utah.edu/~awilliam/box/box.pdf
const float3 tminbound = (bbox.Min() - c_w) / ray_w;
const float3 tmaxbound = (bbox.Max() - c_w) / ray_w;
const float3 tmin = fminf(tminbound,tmaxbound);
const float3 tmax = fmaxf(tminbound,tmaxbound);
const float max_tmin = fmaxf(fmaxf(tmin.x, tmin.y), tmin.z);
const float min_tmax = fminf(fminf(tmax.x, tmax.y), tmax.z);
float d;
// If ray intersects bounding box
if(max_tmin < min_tmax ) {
d = max_tmin;
}else{
d = 0.0f/0.0f;
}
imgd(u,v) = d;
}
}
void RaycastBox(Image<float> imgd, const Mat<float,3,4> T_wc, ImageIntrinsics K, const BoundingBox bbox )
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim, gridDim, imgd);
KernRaycastBox<<<gridDim,blockDim>>>(imgd, T_wc, K, bbox);
GpuCheckErrors();
}
//////////////////////////////////////////////////////
// Raycast sphere
//////////////////////////////////////////////////////
__global__ void KernRaycastSphere(Image<float> imgd, Image<float> img, ImageIntrinsics K, float3 center_c, float r)
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < imgd.w && v < imgd.h ) {
const float3 ray_c = K.Unproject(u,v);
const float ldotc = dot(ray_c,center_c);
const float lsq = dot(ray_c,ray_c);
const float csq = dot(center_c,center_c);
float depth = (ldotc - sqrt(ldotc*ldotc - lsq*(csq - r*r) )) / lsq;
const float prev_depth = imgd(u,v);
if(depth > 0 && (depth < prev_depth || !isfinite(prev_depth)) ) {
imgd(u,v) = depth;
if(img.ptr) {
const float3 p_c = depth * ray_c;
const float3 n_c = p_c - center_c;
img(u,v) = PhongShade(p_c, n_c / length(n_c));
}
}
}
}
void RaycastSphere(Image<float> imgd, Image<float> img, const Mat<float,3,4> T_wc, ImageIntrinsics K, float3 center, float r)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim, gridDim, imgd);
const float3 center_c = mulSE3inv(T_wc, center);
KernRaycastSphere<<<gridDim,blockDim>>>(imgd, img, K, center_c, r);
GpuCheckErrors();
}
//////////////////////////////////////////////////////
// Raycast plane
//////////////////////////////////////////////////////
__global__ void KernRaycastPlane(Image<float> imgd, Image<float> img, ImageIntrinsics K, const float3 n_c)
{
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
if( u < img.w && v < img.h ) {
const float3 ray_c = K.Unproject(u,v);
const float depth = -1 / dot(n_c, ray_c);
const float prev_depth = imgd(u,v);
if(depth > 0 && (depth < prev_depth || !isfinite(prev_depth)) ) {
const float3 p_c = depth * ray_c;
img(u,v) = PhongShade(p_c, n_c / length(n_c) );
imgd(u,v) = depth;
}
}
}
void RaycastPlane(Image<float> imgd, Image<float> img, const Mat<float,3,4> T_wc, ImageIntrinsics K, const float3 n_w )
{
const float3 n_c = Plane_b_from_a(T_wc, n_w);
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim, gridDim, img);
KernRaycastPlane<<<gridDim,blockDim>>>(imgd, img, K, n_c );
GpuCheckErrors();
}
}
|
8d747e31937112694bbfe5db4d37e805d9af679e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c) {
// Position 1: To write Code here later
int Ix, Iy, index;
int n = 16;
Ix = blockIdx.x * blockDim.x + threadIdx.x;
Iy = blockIdx.y * blockDim.y + threadIdx.y;
index = Ix * blockDim.x * gridDim.y + Iy * blockDim.y * gridDim.y ;
int stride = 1 ;
for (int i = index; i < n; i+=stride)
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b, *c, *da, *db, *dc, N=16, i, j;
a = (int*)malloc(sizeof(int)*N); // allocate host mem
b = (int*)malloc(sizeof(int)*N); // and assign random
c = (int*)malloc(sizeof(int)*N); // memory
// Write code to initialize both a and b to 1s.
for (i = 0; i < N; i++) {
a[i] = b[i] = 1;
}
hipMalloc((void **)&da, sizeof(int)*N);
hipMalloc((void **)&db, sizeof(int)*N);
hipMalloc((void **)&dc, sizeof(int)*N);
hipMemcpy(da, a, sizeof(int)*N, hipMemcpyHostToDevice);
hipMemcpy(db, b, sizeof(int)*N, hipMemcpyHostToDevice);
dim3 dimGrid(N/8, N/8, 1);
dim3 dimBlock(N/8, N/8, 1);
hipLaunchKernelGGL(( add), dim3(dimGrid),dim3(dimBlock), 0, 0, da, db, dc);
hipMemcpy(c, dc, sizeof(int)*N, hipMemcpyDeviceToHost);
for (j = 0; j < N/4; j++) {
for (i = 0; i < N/4; i++) {
printf("a[%d] + b[%d] = %d\n", j*N/4+i, j*N/4+i, c[j*N/4+i]);
}
printf("\n");
}
printf("\n");
} | 8d747e31937112694bbfe5db4d37e805d9af679e.cu | #include <stdio.h>
#include <stdlib.h>
__global__ void add(int *a, int *b, int *c) {
// Position 1: To write Code here later
int Ix, Iy, index;
int n = 16;
Ix = blockIdx.x * blockDim.x + threadIdx.x;
Iy = blockIdx.y * blockDim.y + threadIdx.y;
index = Ix * blockDim.x * gridDim.y + Iy * blockDim.y * gridDim.y ;
int stride = 1 ;
for (int i = index; i < n; i+=stride)
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b, *c, *da, *db, *dc, N=16, i, j;
a = (int*)malloc(sizeof(int)*N); // allocate host mem
b = (int*)malloc(sizeof(int)*N); // and assign random
c = (int*)malloc(sizeof(int)*N); // memory
// Write code to initialize both a and b to 1’s.
for (i = 0; i < N; i++) {
a[i] = b[i] = 1;
}
cudaMalloc((void **)&da, sizeof(int)*N);
cudaMalloc((void **)&db, sizeof(int)*N);
cudaMalloc((void **)&dc, sizeof(int)*N);
cudaMemcpy(da, a, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(db, b, sizeof(int)*N, cudaMemcpyHostToDevice);
dim3 dimGrid(N/8, N/8, 1);
dim3 dimBlock(N/8, N/8, 1);
add<<<dimGrid,dimBlock>>>(da, db, dc);
cudaMemcpy(c, dc, sizeof(int)*N, cudaMemcpyDeviceToHost);
for (j = 0; j < N/4; j++) {
for (i = 0; i < N/4; i++) {
printf("a[%d] + b[%d] = %d\n", j*N/4+i, j*N/4+i, c[j*N/4+i]);
}
printf("\n");
}
printf("\n");
} |
336c886d99d9d00f14ec15ca5624832d3b0e1d32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _WIN32
#define WINDOWS_LEAN_AND_MEAN
#define NOMINMAX
#include <windows.h>
#endif
////////////////////////////////////////////////////////////////////////////////
// Includes
////////////////////////////////////////////////////////////////////////////////
//#include <GL/glew.h>
//#include <GL/glut.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
#include <cuda_gl_interop.h>
#include "bucketsort.cuh"
// includes, kernels
#include "bucketsort_kernel.cu"
#include "histogram1024_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// Forward declarations
////////////////////////////////////////////////////////////////////////////////
void calcPivotPoints(float *histogram, int histosize, int listsize,
int divisions, float min, float max, float *pivotPoints,
float histo_width);
////////////////////////////////////////////////////////////////////////////////
// Globals
////////////////////////////////////////////////////////////////////////////////
const int histosize = 1024;
unsigned int *h_offsets = NULL;
unsigned int *d_offsets = NULL;
int *d_indice = NULL;
float *pivotPoints = NULL;
float *historesult = NULL;
float *l_pivotpoints = NULL;
unsigned int *d_prefixoffsets = NULL;
unsigned int *l_offsets = NULL;
////////////////////////////////////////////////////////////////////////////////
// Initialize the bucketsort algorithm
////////////////////////////////////////////////////////////////////////////////
void init_bucketsort(int listsize) {
h_offsets = (unsigned int *)malloc(histosize * sizeof(int));
checkCudaErrors(
hipMalloc((void **)&d_offsets, histosize * sizeof(unsigned int)));
pivotPoints = (float *)malloc(DIVISIONS * sizeof(float));
checkCudaErrors(hipMalloc((void **)&d_indice, listsize * sizeof(int)));
historesult = (float *)malloc(histosize * sizeof(float));
checkCudaErrors(
hipMalloc((void **)&l_pivotpoints, DIVISIONS * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&l_offsets, DIVISIONS * sizeof(int)));
int blocks = ((listsize - 1) / (BUCKET_THREAD_N * BUCKET_BAND)) + 1;
checkCudaErrors(hipMalloc((void **)&d_prefixoffsets,
blocks * BUCKET_BLOCK_MEMORY * sizeof(int)));
initHistogram1024();
}
////////////////////////////////////////////////////////////////////////////////
// Uninitialize the bucketsort algorithm
////////////////////////////////////////////////////////////////////////////////
void finish_bucketsort() {
checkCudaErrors(hipFree(d_indice));
checkCudaErrors(hipFree(d_offsets));
checkCudaErrors(hipFree(l_pivotpoints));
checkCudaErrors(hipFree(l_offsets));
free(pivotPoints);
free(h_offsets);
free(historesult);
checkCudaErrors(hipFree(d_prefixoffsets));
closeHistogram1024();
}
////////////////////////////////////////////////////////////////////////////////
// Given the input array of floats and the min and max of the distribution,
// sort the elements into float4 aligned buckets of roughly equal size
////////////////////////////////////////////////////////////////////////////////
void bucketSort(float *d_input, float *d_output, int listsize, int *sizes,
int *nullElements, float minimum, float maximum,
unsigned int *origOffsets) {
////////////////////////////////////////////////////////////////////////////
// First pass - Create 1024 bin histogram
////////////////////////////////////////////////////////////////////////////
checkCudaErrors(hipMemset((void *)d_offsets, 0, histosize * sizeof(int)));
histogram1024GPU(h_offsets, d_input, minimum, maximum, listsize);
for (int i = 0; i < histosize; i++)
historesult[i] = (float)h_offsets[i];
///////////////////////////////////////////////////////////////////////////
// Calculate pivot points (CPU algorithm)
///////////////////////////////////////////////////////////////////////////
calcPivotPoints(historesult, histosize, listsize, DIVISIONS, minimum,
maximum, pivotPoints,
(maximum - minimum) / (float)histosize);
///////////////////////////////////////////////////////////////////////////
// Count the bucket sizes in new divisions
///////////////////////////////////////////////////////////////////////////
checkCudaErrors(hipMemcpy(l_pivotpoints, pivotPoints,
(DIVISIONS) * sizeof(int),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemset((void *)d_offsets, 0, DIVISIONS * sizeof(int)));
checkCudaErrors(
hipBindTexture(0, texPivot, l_pivotpoints, DIVISIONS * sizeof(int)));
// Setup block and grid
dim3 threads(BUCKET_THREAD_N, 1);
int blocks = ((listsize - 1) / (threads.x * BUCKET_BAND)) + 1;
dim3 grid(blocks, 1);
// Find the new indice for all elements
hipLaunchKernelGGL(( bucketcount), dim3(grid), dim3(threads), 0, 0, d_input, d_indice, d_prefixoffsets,
listsize);
///////////////////////////////////////////////////////////////////////////
// Prefix scan offsets and align each division to float4 (required by
// mergesort)
///////////////////////////////////////////////////////////////////////////
#ifdef BUCKET_WG_SIZE_0
threads.x = BUCKET_WG_SIZE_0;
#else
threads.x = 128;
#endif
grid.x = DIVISIONS / threads.x;
hipLaunchKernelGGL(( bucketprefixoffset), dim3(grid), dim3(threads), 0, 0, d_prefixoffsets, d_offsets, blocks);
// copy the sizes from device to host
hipMemcpy(h_offsets, d_offsets, DIVISIONS * sizeof(int),
hipMemcpyDeviceToHost);
origOffsets[0] = 0;
for (int i = 0; i < DIVISIONS; i++) {
origOffsets[i + 1] = h_offsets[i] + origOffsets[i];
if ((h_offsets[i] % 4) != 0) {
nullElements[i] = (h_offsets[i] & ~3) + 4 - h_offsets[i];
} else
nullElements[i] = 0;
}
for (int i = 0; i < DIVISIONS; i++)
sizes[i] = (h_offsets[i] + nullElements[i]) / 4;
for (int i = 0; i < DIVISIONS; i++) {
if ((h_offsets[i] % 4) != 0)
h_offsets[i] = (h_offsets[i] & ~3) + 4;
}
for (int i = 1; i < DIVISIONS; i++)
h_offsets[i] = h_offsets[i - 1] + h_offsets[i];
for (int i = DIVISIONS - 1; i > 0; i--)
h_offsets[i] = h_offsets[i - 1];
h_offsets[0] = 0;
///////////////////////////////////////////////////////////////////////////
// Finally, sort the lot
///////////////////////////////////////////////////////////////////////////
hipMemcpy(l_offsets, h_offsets, (DIVISIONS) * sizeof(int),
hipMemcpyHostToDevice);
hipMemset(d_output, 0x0, (listsize + (DIVISIONS * 4)) * sizeof(float));
threads.x = BUCKET_THREAD_N;
blocks = ((listsize - 1) / (threads.x * BUCKET_BAND)) + 1;
grid.x = blocks;
hipLaunchKernelGGL(( bucketsort), dim3(grid), dim3(threads), 0, 0, d_input, d_indice, d_output, listsize,
d_prefixoffsets, l_offsets);
}
////////////////////////////////////////////////////////////////////////////////
// Given a histogram of the list, figure out suitable pivotpoints that divide
// the list into approximately listsize/divisions elements each
////////////////////////////////////////////////////////////////////////////////
void calcPivotPoints(float *histogram, int histosize, int listsize,
int divisions, float min, float max, float *pivotPoints,
float histo_width) {
float elemsPerSlice = listsize / (float)divisions;
float startsAt = min;
float endsAt = min + histo_width;
float we_need = elemsPerSlice;
int p_idx = 0;
for (int i = 0; i < histosize; i++) {
if (i == histosize - 1) {
if (!(p_idx < divisions)) {
pivotPoints[p_idx++] =
startsAt + (we_need / histogram[i]) * histo_width;
}
break;
}
while (histogram[i] > we_need) {
if (!(p_idx < divisions)) {
printf("i=%d, p_idx = %d, divisions = %d\n", i, p_idx,
divisions);
exit(0);
}
pivotPoints[p_idx++] =
startsAt + (we_need / histogram[i]) * histo_width;
startsAt += (we_need / histogram[i]) * histo_width;
histogram[i] -= we_need;
we_need = elemsPerSlice;
}
// grab what we can from what remains of it
we_need -= histogram[i];
startsAt = endsAt;
endsAt += histo_width;
}
while (p_idx < divisions) {
pivotPoints[p_idx] = pivotPoints[p_idx - 1];
p_idx++;
}
}
| 336c886d99d9d00f14ec15ca5624832d3b0e1d32.cu | #ifdef _WIN32
#define WINDOWS_LEAN_AND_MEAN
#define NOMINMAX
#include <windows.h>
#endif
////////////////////////////////////////////////////////////////////////////////
// Includes
////////////////////////////////////////////////////////////////////////////////
//#include <GL/glew.h>
//#include <GL/glut.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <helper_cuda.h>
#include <cuda_gl_interop.h>
#include "bucketsort.cuh"
// includes, kernels
#include "bucketsort_kernel.cu"
#include "histogram1024_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// Forward declarations
////////////////////////////////////////////////////////////////////////////////
void calcPivotPoints(float *histogram, int histosize, int listsize,
int divisions, float min, float max, float *pivotPoints,
float histo_width);
////////////////////////////////////////////////////////////////////////////////
// Globals
////////////////////////////////////////////////////////////////////////////////
const int histosize = 1024;
unsigned int *h_offsets = NULL;
unsigned int *d_offsets = NULL;
int *d_indice = NULL;
float *pivotPoints = NULL;
float *historesult = NULL;
float *l_pivotpoints = NULL;
unsigned int *d_prefixoffsets = NULL;
unsigned int *l_offsets = NULL;
////////////////////////////////////////////////////////////////////////////////
// Initialize the bucketsort algorithm
////////////////////////////////////////////////////////////////////////////////
void init_bucketsort(int listsize) {
h_offsets = (unsigned int *)malloc(histosize * sizeof(int));
checkCudaErrors(
cudaMalloc((void **)&d_offsets, histosize * sizeof(unsigned int)));
pivotPoints = (float *)malloc(DIVISIONS * sizeof(float));
checkCudaErrors(cudaMalloc((void **)&d_indice, listsize * sizeof(int)));
historesult = (float *)malloc(histosize * sizeof(float));
checkCudaErrors(
cudaMalloc((void **)&l_pivotpoints, DIVISIONS * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&l_offsets, DIVISIONS * sizeof(int)));
int blocks = ((listsize - 1) / (BUCKET_THREAD_N * BUCKET_BAND)) + 1;
checkCudaErrors(cudaMalloc((void **)&d_prefixoffsets,
blocks * BUCKET_BLOCK_MEMORY * sizeof(int)));
initHistogram1024();
}
////////////////////////////////////////////////////////////////////////////////
// Uninitialize the bucketsort algorithm
////////////////////////////////////////////////////////////////////////////////
void finish_bucketsort() {
checkCudaErrors(cudaFree(d_indice));
checkCudaErrors(cudaFree(d_offsets));
checkCudaErrors(cudaFree(l_pivotpoints));
checkCudaErrors(cudaFree(l_offsets));
free(pivotPoints);
free(h_offsets);
free(historesult);
checkCudaErrors(cudaFree(d_prefixoffsets));
closeHistogram1024();
}
////////////////////////////////////////////////////////////////////////////////
// Given the input array of floats and the min and max of the distribution,
// sort the elements into float4 aligned buckets of roughly equal size
////////////////////////////////////////////////////////////////////////////////
void bucketSort(float *d_input, float *d_output, int listsize, int *sizes,
int *nullElements, float minimum, float maximum,
unsigned int *origOffsets) {
////////////////////////////////////////////////////////////////////////////
// First pass - Create 1024 bin histogram
////////////////////////////////////////////////////////////////////////////
checkCudaErrors(cudaMemset((void *)d_offsets, 0, histosize * sizeof(int)));
histogram1024GPU(h_offsets, d_input, minimum, maximum, listsize);
for (int i = 0; i < histosize; i++)
historesult[i] = (float)h_offsets[i];
///////////////////////////////////////////////////////////////////////////
// Calculate pivot points (CPU algorithm)
///////////////////////////////////////////////////////////////////////////
calcPivotPoints(historesult, histosize, listsize, DIVISIONS, minimum,
maximum, pivotPoints,
(maximum - minimum) / (float)histosize);
///////////////////////////////////////////////////////////////////////////
// Count the bucket sizes in new divisions
///////////////////////////////////////////////////////////////////////////
checkCudaErrors(cudaMemcpy(l_pivotpoints, pivotPoints,
(DIVISIONS) * sizeof(int),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset((void *)d_offsets, 0, DIVISIONS * sizeof(int)));
checkCudaErrors(
cudaBindTexture(0, texPivot, l_pivotpoints, DIVISIONS * sizeof(int)));
// Setup block and grid
dim3 threads(BUCKET_THREAD_N, 1);
int blocks = ((listsize - 1) / (threads.x * BUCKET_BAND)) + 1;
dim3 grid(blocks, 1);
// Find the new indice for all elements
bucketcount<<<grid, threads>>>(d_input, d_indice, d_prefixoffsets,
listsize);
///////////////////////////////////////////////////////////////////////////
// Prefix scan offsets and align each division to float4 (required by
// mergesort)
///////////////////////////////////////////////////////////////////////////
#ifdef BUCKET_WG_SIZE_0
threads.x = BUCKET_WG_SIZE_0;
#else
threads.x = 128;
#endif
grid.x = DIVISIONS / threads.x;
bucketprefixoffset<<<grid, threads>>>(d_prefixoffsets, d_offsets, blocks);
// copy the sizes from device to host
cudaMemcpy(h_offsets, d_offsets, DIVISIONS * sizeof(int),
cudaMemcpyDeviceToHost);
origOffsets[0] = 0;
for (int i = 0; i < DIVISIONS; i++) {
origOffsets[i + 1] = h_offsets[i] + origOffsets[i];
if ((h_offsets[i] % 4) != 0) {
nullElements[i] = (h_offsets[i] & ~3) + 4 - h_offsets[i];
} else
nullElements[i] = 0;
}
for (int i = 0; i < DIVISIONS; i++)
sizes[i] = (h_offsets[i] + nullElements[i]) / 4;
for (int i = 0; i < DIVISIONS; i++) {
if ((h_offsets[i] % 4) != 0)
h_offsets[i] = (h_offsets[i] & ~3) + 4;
}
for (int i = 1; i < DIVISIONS; i++)
h_offsets[i] = h_offsets[i - 1] + h_offsets[i];
for (int i = DIVISIONS - 1; i > 0; i--)
h_offsets[i] = h_offsets[i - 1];
h_offsets[0] = 0;
///////////////////////////////////////////////////////////////////////////
// Finally, sort the lot
///////////////////////////////////////////////////////////////////////////
cudaMemcpy(l_offsets, h_offsets, (DIVISIONS) * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemset(d_output, 0x0, (listsize + (DIVISIONS * 4)) * sizeof(float));
threads.x = BUCKET_THREAD_N;
blocks = ((listsize - 1) / (threads.x * BUCKET_BAND)) + 1;
grid.x = blocks;
bucketsort<<<grid, threads>>>(d_input, d_indice, d_output, listsize,
d_prefixoffsets, l_offsets);
}
////////////////////////////////////////////////////////////////////////////////
// Given a histogram of the list, figure out suitable pivotpoints that divide
// the list into approximately listsize/divisions elements each
////////////////////////////////////////////////////////////////////////////////
void calcPivotPoints(float *histogram, int histosize, int listsize,
int divisions, float min, float max, float *pivotPoints,
float histo_width) {
float elemsPerSlice = listsize / (float)divisions;
float startsAt = min;
float endsAt = min + histo_width;
float we_need = elemsPerSlice;
int p_idx = 0;
for (int i = 0; i < histosize; i++) {
if (i == histosize - 1) {
if (!(p_idx < divisions)) {
pivotPoints[p_idx++] =
startsAt + (we_need / histogram[i]) * histo_width;
}
break;
}
while (histogram[i] > we_need) {
if (!(p_idx < divisions)) {
printf("i=%d, p_idx = %d, divisions = %d\n", i, p_idx,
divisions);
exit(0);
}
pivotPoints[p_idx++] =
startsAt + (we_need / histogram[i]) * histo_width;
startsAt += (we_need / histogram[i]) * histo_width;
histogram[i] -= we_need;
we_need = elemsPerSlice;
}
// grab what we can from what remains of it
we_need -= histogram[i];
startsAt = endsAt;
endsAt += histo_width;
}
while (p_idx < divisions) {
pivotPoints[p_idx] = pivotPoints[p_idx - 1];
p_idx++;
}
}
|
78e3ab9830080e06062a96dd0ed5d0d1277ead7d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#define N 2048 * 2048 // Number of elements in each vector
/*
* Aim to profile `saxpy` (without modifying `N`) running under
* 20us.
*/
__global__ void saxpy(int * a, int * b, int * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for ( int i = tid; i < N; i += stride ){
c[i] = 2 * a[i] + b[i];
}
}
inline hipError_t checkCuda(hipError_t result)
{
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
return result;
}
int main()
{
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs);
int *a, *b, *c;
int size = N * sizeof (int); // The total number of bytes per vector
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
hipMemPrefetchAsync(a, size, hipCpuDeviceId);
hipMemPrefetchAsync(b, size, hipCpuDeviceId);
hipMemPrefetchAsync(c, size, hipCpuDeviceId);
// Initialize memory
for( int i = 0; i < N; ++i )
{
a[i] = 2;
b[i] = 1;
c[i] = 0;
}
int threads_per_block = 256;
int number_of_blocks = 32 * numberOfSMs;
hipMemPrefetchAsync(a, size, deviceId);
hipMemPrefetchAsync(b, size, deviceId);
hipMemPrefetchAsync(c, size, deviceId);
hipLaunchKernelGGL(( saxpy) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c );
hipDeviceSynchronize();
hipMemPrefetchAsync(c, size, hipCpuDeviceId);
// Print out the first and last 5 values of c for a quality check
for( int i = 0; i < 5; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
for( int i = N-5; i < N; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
hipFree( a ); hipFree( b ); hipFree( c );
}
| 78e3ab9830080e06062a96dd0ed5d0d1277ead7d.cu | #include <stdio.h>
#include <assert.h>
#define N 2048 * 2048 // Number of elements in each vector
/*
* Aim to profile `saxpy` (without modifying `N`) running under
* 20us.
*/
__global__ void saxpy(int * a, int * b, int * c)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for ( int i = tid; i < N; i += stride ){
c[i] = 2 * a[i] + b[i];
}
}
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs);
int *a, *b, *c;
int size = N * sizeof (int); // The total number of bytes per vector
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
cudaMemPrefetchAsync(a, size, cudaCpuDeviceId);
cudaMemPrefetchAsync(b, size, cudaCpuDeviceId);
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
// Initialize memory
for( int i = 0; i < N; ++i )
{
a[i] = 2;
b[i] = 1;
c[i] = 0;
}
int threads_per_block = 256;
int number_of_blocks = 32 * numberOfSMs;
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
cudaMemPrefetchAsync(c, size, deviceId);
saxpy <<< number_of_blocks, threads_per_block >>> ( a, b, c );
cudaDeviceSynchronize();
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
// Print out the first and last 5 values of c for a quality check
for( int i = 0; i < 5; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
for( int i = N-5; i < N; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
cudaFree( a ); cudaFree( b ); cudaFree( c );
}
|
32e489f8525597a8cfa51e7a68eb8b21f3690073.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* CSCI 563 Programming Assignment 2
Clayton Kramp
*/
#include <iostream>
#include <fstream>
using namespace std;
// Main Device Function to be used to count number of ones
__global__ void countOnes(int* A, int* count, int row, int col) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= row || j >= col) return;
if (A[i * col + j] == 1) {
// Atomic addition for race conditions
atomicAdd(count, 1);
}
}
int main(int argc, char* argv[]) {
if (argc != 2) {
cerr << "Arguments error" << endl;
return -1;
}
ifstream file(argv[1]);
if (!file.good()) {
cerr << "Bad input" << endl;
return -1;
}
int row, col;
file >> col >> row;
int** A = new int*[row];
A[0] = new int[row*col];
for (int i = 1; i < row; i++) A[i] = A[i-1] + col;
// Fill in Host Array A
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
int element;
file >> element;
A[i][j] = element;
}
}
file.close();
int* count = new int;
*count = 0;
// Copy memory to device array deviceA
int* deviceA;
int bytes = row * col * sizeof(int);
hipMalloc(&deviceA, bytes);
hipMemcpy(deviceA, A[0], bytes, hipMemcpyHostToDevice);
// Copy deviceCount
int* deviceCount;
hipMalloc(&deviceCount, 4);
hipMemcpy(deviceCount, count, 4, hipMemcpyHostToDevice);
dim3 threadsPerBlock(8, 8, 1);
dim3 numBlocks((col + threadsPerBlock.x-1) / threadsPerBlock.x,
(row + threadsPerBlock.y-1) / threadsPerBlock.y, 1);
// Launch the program
hipLaunchKernelGGL(( countOnes), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, deviceA, deviceCount, row, col);
//hipDeviceSynchronize();
// Copy back from device the deviceCount
hipMemcpy(count, deviceCount, 4, hipMemcpyDeviceToHost);
cout << *count << endl;
delete A[0];
delete A;
hipFree(deviceA);
hipFree(deviceCount);
return 0;
}
| 32e489f8525597a8cfa51e7a68eb8b21f3690073.cu | /* CSCI 563 Programming Assignment 2
Clayton Kramp
*/
#include <iostream>
#include <fstream>
using namespace std;
// Main Device Function to be used to count number of ones
__global__ void countOnes(int* A, int* count, int row, int col) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= row || j >= col) return;
if (A[i * col + j] == 1) {
// Atomic addition for race conditions
atomicAdd(count, 1);
}
}
int main(int argc, char* argv[]) {
if (argc != 2) {
cerr << "Arguments error" << endl;
return -1;
}
ifstream file(argv[1]);
if (!file.good()) {
cerr << "Bad input" << endl;
return -1;
}
int row, col;
file >> col >> row;
int** A = new int*[row];
A[0] = new int[row*col];
for (int i = 1; i < row; i++) A[i] = A[i-1] + col;
// Fill in Host Array A
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
int element;
file >> element;
A[i][j] = element;
}
}
file.close();
int* count = new int;
*count = 0;
// Copy memory to device array deviceA
int* deviceA;
int bytes = row * col * sizeof(int);
cudaMalloc(&deviceA, bytes);
cudaMemcpy(deviceA, A[0], bytes, cudaMemcpyHostToDevice);
// Copy deviceCount
int* deviceCount;
cudaMalloc(&deviceCount, 4);
cudaMemcpy(deviceCount, count, 4, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(8, 8, 1);
dim3 numBlocks((col + threadsPerBlock.x-1) / threadsPerBlock.x,
(row + threadsPerBlock.y-1) / threadsPerBlock.y, 1);
// Launch the program
countOnes<<<numBlocks, threadsPerBlock>>>(deviceA, deviceCount, row, col);
//cudaDeviceSynchronize();
// Copy back from device the deviceCount
cudaMemcpy(count, deviceCount, 4, cudaMemcpyDeviceToHost);
cout << *count << endl;
delete A[0];
delete A;
cudaFree(deviceA);
cudaFree(deviceCount);
return 0;
}
|
f832a981764c2c05201499e43f9f5e12b149de5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <Windows.h>
#include <stdio.h>
#include "timer.h"
const int GpuThreadCount = 256;
const int MAXBLOCKS = 65535;
//const int testSize = 1073741824;
//const int testSize = 268435456;
//const int testSize = 134217728;
//const int testSize = 67108864;
const int testSize = 33554432;
//const int testSize = 16777216;
//const int testSize = 4200000;
//const int testSize = 4194304;
//const int testSize = 65536*2;
int calcBlockSize(int dataSize, int threadCount)
{
int retval = (dataSize/threadCount);
if ((dataSize % threadCount) != 0)
{
retval++;
}
if (retval > MAXBLOCKS)
retval = MAXBLOCKS;
return retval;
}
struct collatzResult
{
collatzResult()
{
sequenceStart = 0;
numberOfSteps = 0;
}
static collatzResult Reduce(collatzResult *subResults, int size)
{
collatzResult result;
int greatestNumberOfSteps = 0;
for(int i = 0; i < size; i++)
{
if (subResults[i].numberOfSteps > greatestNumberOfSteps)
{
result = subResults[i];
greatestNumberOfSteps = result.numberOfSteps;
}
}
return result;
}
public:
int sequenceStart;
int numberOfSteps;
};
struct gpu_collatzResult
{
int sequenceStart;
int numberOfSteps;
};
int cpu_calcCollatzNumber(int sequenceStart)
{
int count = 0;
long long current = sequenceStart;
while(current != 1)
{
if (current & 1)
{
current = current * 3 + 1;
}
else
{
current = current / 2;
}
count++;
if (current < sequenceStart)
break;
}
return count;
}
collatzResult cpu_calcCollatzNumbers(int size)
{
collatzResult result;
int sequenceStart = 1;
for(int i = 1; i < size; i++)
{
int steps = cpu_calcCollatzNumber(sequenceStart);
if (steps < 0)
{
throw 0;
}
if (steps > result.numberOfSteps)
{
result.numberOfSteps = steps;
result.sequenceStart = sequenceStart;
}
sequenceStart += 2;
}
return result;
}
bool cpu_verifyResult(collatzResult result)
{
int steps = cpu_calcCollatzNumber(result.sequenceStart);
return (steps == result.numberOfSteps);
}
__device__ void reduce(gpu_collatzResult *blockResults)
{
int i = blockDim.x;
if ((i % 2) != 0)
{
i--;
i |= (i >> 1);
i |= (i >> 2);
i |= (i >> 4);
i |= (i >> 8);
i |= (i >> 16);
i++;
}
while(i >= 1)
{
if ((threadIdx.x < i) &&
(threadIdx.x + i) < blockDim.x)
{
if (blockResults[threadIdx.x].numberOfSteps < blockResults[threadIdx.x + i].numberOfSteps)
{
blockResults[threadIdx.x].numberOfSteps = blockResults[threadIdx.x + i].numberOfSteps;
blockResults[threadIdx.x].sequenceStart = blockResults[threadIdx.x + i].sequenceStart;
}
}
__syncthreads();
i = i >> 1;
}
}
__global__ void collatzKernel(collatzResult *results)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ gpu_collatzResult blockResults[GpuThreadCount];
blockResults[threadIdx.x].numberOfSteps = 0;
blockResults[threadIdx.x].sequenceStart = 0;
while (i < testSize)
{
int sequenceStart = i*2 + 1;
int count = 0;
long long current = sequenceStart;
while(current != 1)
{
if (current & 1)
{
current = current * 3 + 1;
}
else
{
current = current / 2;
}
count++;
if (current < sequenceStart)
break;
}
if (count > blockResults[threadIdx.x].numberOfSteps)
{
blockResults[threadIdx.x].numberOfSteps = count;
blockResults[threadIdx.x].sequenceStart = sequenceStart;
}
i += blockDim.x * gridDim.x;
}
__syncthreads();
#if 1
reduce(blockResults);
#else
i = blockDim.x;
if ((i % 2) != 0)
{
i--;
i |= (i >> 1);
i |= (i >> 2);
i |= (i >> 4);
i |= (i >> 8);
i |= (i >> 16);
i++;
}
while(i >= 1)
{
if ((threadIdx.x < i) &&
(threadIdx.x + i) < blockDim.x)
{
if (blockResults[threadIdx.x].numberOfSteps < blockResults[threadIdx.x + i].numberOfSteps)
{
blockResults[threadIdx.x].numberOfSteps = blockResults[threadIdx.x + i].numberOfSteps;
blockResults[threadIdx.x].sequenceStart = blockResults[threadIdx.x + i].sequenceStart;
}
}
__syncthreads();
i = i >> 1;
}
#endif
if (threadIdx.x == 0)
{
results[blockIdx.x].numberOfSteps = blockResults[0].numberOfSteps;
results[blockIdx.x].sequenceStart = blockResults[0].sequenceStart;
}
}
collatzResult gpu_calcCollatzNumbers(int size, int threadCount)
{
collatzResult result;
hipError_t cudaStatus;
int blocks = calcBlockSize(size, threadCount);
fprintf(stdout, "Threads %d Blocks %d\n", threadCount, blocks);
collatzResult *results = new collatzResult[blocks];
collatzResult *dev_results = 0;
// Allocate GPU buffer
int allocsize = blocks * sizeof(collatzResult);
cudaStatus = hipMalloc((void**)&dev_results, allocsize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_results, results, allocsize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
hipLaunchKernelGGL(( collatzKernel), dim3(blocks), dim3(threadCount), 0, 0, dev_results);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy resultsfrom GPU buffer to host memory.
cudaStatus = hipMemcpy(results, dev_results, allocsize, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
result = collatzResult::Reduce(results, blocks);
Error:
delete[] results;
hipFree(dev_results);
return result;
}
collatzResult cpu_Test1(int size)
{
Timer t;
t.Start();
collatzResult result = cpu_calcCollatzNumbers(size);
t.Stop();
fprintf(stdout, "CPUTest1 Result: Start %d Steps %d\n", result.sequenceStart, result.numberOfSteps);
fprintf(stdout, "CPUTest1: Total %fms elements per ms: %f\n", t.Elapsed(), testSize / t.Elapsed());
return result;
}
collatzResult gpu_Test(int size, int threadCount)
{
float cudaTime;
hipEvent_t startGpu, stopGpu;
collatzResult result;
const int iterations = 1;
hipError_t cudaStatus;
#if 0
collatzResult cpuResult = cpu_Test1(testSize);
fprintf(stdout, "CPUResult Start %d Steps %d\n", cpuResult.sequenceStart, cpuResult.numberOfSteps);
#endif
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
float maxRate = 0.0;
for(int threads = 15; threads <= threadCount; threads++)
{
hipEventCreate(&stopGpu);
hipEventCreate(&startGpu);
hipEventRecord(startGpu, 0);
for(int i = 0; i < iterations; i++)
{
result = gpu_calcCollatzNumbers(testSize, threads);
}
hipEventRecord(stopGpu, 0);
hipEventSynchronize(stopGpu);
hipEventElapsedTime(&cudaTime, startGpu, stopGpu);
// fprintf(stdout, "GPUTest1 Result: Start %d Steps %d\n", result.sequenceStart, result.numberOfSteps);
float rate = size * iterations / cudaTime;
if (rate > maxRate)
{
fprintf(stdout, "GPUTest1: Total %fms elements per ms: %f\n",cudaTime, rate);
maxRate = rate;
}
if (!cpu_verifyResult(result))
{
fprintf(stderr, "Result Invalid!!\n");
}
}
Error:
return result;
}
int main()
{
fprintf(stdout, "CollatzTest Size=%ld\n", testSize);
// collatzResult result = cpu_Test1(testSize);
collatzResult gpuResult = gpu_Test(testSize, GpuThreadCount);
return 0;
} | f832a981764c2c05201499e43f9f5e12b149de5c.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <Windows.h>
#include <stdio.h>
#include "timer.h"
const int GpuThreadCount = 256;
const int MAXBLOCKS = 65535;
//const int testSize = 1073741824;
//const int testSize = 268435456;
//const int testSize = 134217728;
//const int testSize = 67108864;
const int testSize = 33554432;
//const int testSize = 16777216;
//const int testSize = 4200000;
//const int testSize = 4194304;
//const int testSize = 65536*2;
int calcBlockSize(int dataSize, int threadCount)
{
int retval = (dataSize/threadCount);
if ((dataSize % threadCount) != 0)
{
retval++;
}
if (retval > MAXBLOCKS)
retval = MAXBLOCKS;
return retval;
}
struct collatzResult
{
collatzResult()
{
sequenceStart = 0;
numberOfSteps = 0;
}
static collatzResult Reduce(collatzResult *subResults, int size)
{
collatzResult result;
int greatestNumberOfSteps = 0;
for(int i = 0; i < size; i++)
{
if (subResults[i].numberOfSteps > greatestNumberOfSteps)
{
result = subResults[i];
greatestNumberOfSteps = result.numberOfSteps;
}
}
return result;
}
public:
int sequenceStart;
int numberOfSteps;
};
struct gpu_collatzResult
{
int sequenceStart;
int numberOfSteps;
};
int cpu_calcCollatzNumber(int sequenceStart)
{
int count = 0;
long long current = sequenceStart;
while(current != 1)
{
if (current & 1)
{
current = current * 3 + 1;
}
else
{
current = current / 2;
}
count++;
if (current < sequenceStart)
break;
}
return count;
}
collatzResult cpu_calcCollatzNumbers(int size)
{
collatzResult result;
int sequenceStart = 1;
for(int i = 1; i < size; i++)
{
int steps = cpu_calcCollatzNumber(sequenceStart);
if (steps < 0)
{
throw 0;
}
if (steps > result.numberOfSteps)
{
result.numberOfSteps = steps;
result.sequenceStart = sequenceStart;
}
sequenceStart += 2;
}
return result;
}
bool cpu_verifyResult(collatzResult result)
{
int steps = cpu_calcCollatzNumber(result.sequenceStart);
return (steps == result.numberOfSteps);
}
__device__ void reduce(gpu_collatzResult *blockResults)
{
int i = blockDim.x;
if ((i % 2) != 0)
{
i--;
i |= (i >> 1);
i |= (i >> 2);
i |= (i >> 4);
i |= (i >> 8);
i |= (i >> 16);
i++;
}
while(i >= 1)
{
if ((threadIdx.x < i) &&
(threadIdx.x + i) < blockDim.x)
{
if (blockResults[threadIdx.x].numberOfSteps < blockResults[threadIdx.x + i].numberOfSteps)
{
blockResults[threadIdx.x].numberOfSteps = blockResults[threadIdx.x + i].numberOfSteps;
blockResults[threadIdx.x].sequenceStart = blockResults[threadIdx.x + i].sequenceStart;
}
}
__syncthreads();
i = i >> 1;
}
}
__global__ void collatzKernel(collatzResult *results)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ gpu_collatzResult blockResults[GpuThreadCount];
blockResults[threadIdx.x].numberOfSteps = 0;
blockResults[threadIdx.x].sequenceStart = 0;
while (i < testSize)
{
int sequenceStart = i*2 + 1;
int count = 0;
long long current = sequenceStart;
while(current != 1)
{
if (current & 1)
{
current = current * 3 + 1;
}
else
{
current = current / 2;
}
count++;
if (current < sequenceStart)
break;
}
if (count > blockResults[threadIdx.x].numberOfSteps)
{
blockResults[threadIdx.x].numberOfSteps = count;
blockResults[threadIdx.x].sequenceStart = sequenceStart;
}
i += blockDim.x * gridDim.x;
}
__syncthreads();
#if 1
reduce(blockResults);
#else
i = blockDim.x;
if ((i % 2) != 0)
{
i--;
i |= (i >> 1);
i |= (i >> 2);
i |= (i >> 4);
i |= (i >> 8);
i |= (i >> 16);
i++;
}
while(i >= 1)
{
if ((threadIdx.x < i) &&
(threadIdx.x + i) < blockDim.x)
{
if (blockResults[threadIdx.x].numberOfSteps < blockResults[threadIdx.x + i].numberOfSteps)
{
blockResults[threadIdx.x].numberOfSteps = blockResults[threadIdx.x + i].numberOfSteps;
blockResults[threadIdx.x].sequenceStart = blockResults[threadIdx.x + i].sequenceStart;
}
}
__syncthreads();
i = i >> 1;
}
#endif
if (threadIdx.x == 0)
{
results[blockIdx.x].numberOfSteps = blockResults[0].numberOfSteps;
results[blockIdx.x].sequenceStart = blockResults[0].sequenceStart;
}
}
collatzResult gpu_calcCollatzNumbers(int size, int threadCount)
{
collatzResult result;
cudaError_t cudaStatus;
int blocks = calcBlockSize(size, threadCount);
fprintf(stdout, "Threads %d Blocks %d\n", threadCount, blocks);
collatzResult *results = new collatzResult[blocks];
collatzResult *dev_results = 0;
// Allocate GPU buffer
int allocsize = blocks * sizeof(collatzResult);
cudaStatus = cudaMalloc((void**)&dev_results, allocsize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_results, results, allocsize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
collatzKernel<<<blocks, threadCount>>>(dev_results);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy resultsfrom GPU buffer to host memory.
cudaStatus = cudaMemcpy(results, dev_results, allocsize, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
result = collatzResult::Reduce(results, blocks);
Error:
delete[] results;
cudaFree(dev_results);
return result;
}
collatzResult cpu_Test1(int size)
{
Timer t;
t.Start();
collatzResult result = cpu_calcCollatzNumbers(size);
t.Stop();
fprintf(stdout, "CPUTest1 Result: Start %d Steps %d\n", result.sequenceStart, result.numberOfSteps);
fprintf(stdout, "CPUTest1: Total %fms elements per ms: %f\n", t.Elapsed(), testSize / t.Elapsed());
return result;
}
collatzResult gpu_Test(int size, int threadCount)
{
float cudaTime;
cudaEvent_t startGpu, stopGpu;
collatzResult result;
const int iterations = 1;
cudaError_t cudaStatus;
#if 0
collatzResult cpuResult = cpu_Test1(testSize);
fprintf(stdout, "CPUResult Start %d Steps %d\n", cpuResult.sequenceStart, cpuResult.numberOfSteps);
#endif
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
float maxRate = 0.0;
for(int threads = 15; threads <= threadCount; threads++)
{
cudaEventCreate(&stopGpu);
cudaEventCreate(&startGpu);
cudaEventRecord(startGpu, 0);
for(int i = 0; i < iterations; i++)
{
result = gpu_calcCollatzNumbers(testSize, threads);
}
cudaEventRecord(stopGpu, 0);
cudaEventSynchronize(stopGpu);
cudaEventElapsedTime(&cudaTime, startGpu, stopGpu);
// fprintf(stdout, "GPUTest1 Result: Start %d Steps %d\n", result.sequenceStart, result.numberOfSteps);
float rate = size * iterations / cudaTime;
if (rate > maxRate)
{
fprintf(stdout, "GPUTest1: Total %fms elements per ms: %f\n",cudaTime, rate);
maxRate = rate;
}
if (!cpu_verifyResult(result))
{
fprintf(stderr, "Result Invalid!!\n");
}
}
Error:
return result;
}
int main()
{
fprintf(stdout, "CollatzTest Size=%ld\n", testSize);
// collatzResult result = cpu_Test1(testSize);
collatzResult gpuResult = gpu_Test(testSize, GpuThreadCount);
return 0;
} |
30323325148608560cb4c7102bfb2e168581709e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _ORDERGRAPH_KERNEL_H_
#define _ORDERGRAPH_KERNEL_H_
#include "data50.cu"
#include <stdio.h>
;
char name[20] = "50.out";
__device__ void Dincr(int *bit, int n);
__device__ void DincrS(int *bit, int n);
__device__ bool D_getState(int parN, int *sta, int time);
__device__ void D_findComb(int *comb, int l, int n);
__device__ int D_findindex(int *arr, int size);
__device__ int D_C(int n, int a);
__global__ void genScoreKernel(int sizepernode, float *D_localscore,
int *D_data, float *D_LG) {
int id = blockIdx.x * 256 + threadIdx.x;
int node, index;
bool flag;
int parent[5] = {0};
int pre[NODE_N] = {0};
int state[5] = {0};
int i, j, parN = 0, tmp, t;
int t1 = 0, t2 = 0;
float ls = 0;
int Nij[STATE_N] = {0};
if (id < sizepernode) {
D_findComb(parent, id, NODE_N - 1);
for (i = 0; i < 4; i++) {
if (parent[i] > 0)
parN++;
}
for (node = 0; node < NODE_N; node++) {
j = 1;
for (i = 0; i < NODE_N; i++) {
if (i != node)
pre[j++] = i;
}
for (tmp = 0; tmp < parN; tmp++)
state[tmp] = 0;
index = sizepernode * node + id;
// priors
/*
for(tmp=1;tmp<=4;tmp++){
localscore[index]+=100*(prior[node][pre[parent[tmp]]]-0.5)*(prior[node][pre[parent[tmp]]]-0.5)*(prior[node][pre[parent[tmp]]]-0.5);
}
*/
t = 0;
while (D_getState(parN, state, t++)) { // for get state
// printf("test %u\n",id);
ls = 0;
for (tmp = 0; tmp < STATE_N; tmp++)
Nij[tmp] = 0;
for (t1 = 0; t1 < DATA_N; t1++) {
flag = true;
for (t2 = 0; t2 < parN; t2++) {
if (D_data[t1 * NODE_N + pre[parent[t2]]] != state[t2]) {
flag = false;
break;
}
}
if (!flag)
continue;
Nij[D_data[t1 * NODE_N + node]]++;
}
tmp = STATE_N - 1;
for (t1 = 0; t1 < STATE_N; t1++) {
ls += D_LG[Nij[t1]];
tmp += Nij[t1];
}
ls -= D_LG[tmp];
ls += D_LG[STATE_N - 1];
D_localscore[index] += ls;
}
}
}
}
__global__ void computeKernel(int taskperthr, int sizepernode,
float *D_localscore, bool *D_parent, int node,
int total, float *D_Score, int *D_resP) {
extern __shared__ float lsinblock[];
const unsigned int id = blockIdx.x * 256 + threadIdx.x;
const unsigned int tid = threadIdx.x;
const unsigned int bid = blockIdx.x;
int posN = 1, i, index, t, tmp;
int pre[NODE_N] = {0};
int parN = 0;
int bestparent[4] = {0}, parent[5] = {-1};
float bestls = -999999999999999, ls;
for (i = 0; i < NODE_N; i++) {
if (D_parent[i] == 1) {
pre[posN++] = i;
}
}
for (i = 0; i < taskperthr && ((id * taskperthr + i) < total); i++) {
D_findComb(parent, id * taskperthr + i, posN);
for (parN = 0; parN < 4; parN++) {
if (parent[parN] < 0)
break;
if (pre[parent[parN]] > node)
parent[parN] = pre[parent[parN]];
else
parent[parN] = pre[parent[parN]] + 1;
}
for (tmp = parN; tmp > 0; tmp--) {
parent[tmp] = parent[tmp - 1];
}
parent[0] = 0;
index = D_findindex(parent, parN);
index += sizepernode * node;
ls = D_localscore[index];
if (ls > bestls) {
bestls = ls;
for (tmp = 0; tmp < 4; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
lsinblock[tid] = bestls;
__syncthreads();
for (i = 128; i >= 1; i /= 2) {
if (tid < i) {
if (lsinblock[tid + i] > lsinblock[tid] && lsinblock[tid + i] < 0) {
lsinblock[tid] = lsinblock[tid + i];
lsinblock[tid + i] = (float)(tid + i);
} else if (lsinblock[tid + i] < lsinblock[tid] && lsinblock[tid] < 0) {
lsinblock[tid + i] = (float)tid;
} else if (lsinblock[tid] > 0 && lsinblock[tid + i] < 0) {
lsinblock[tid] = lsinblock[tid + i];
lsinblock[tid + i] = (float)(tid + i);
} else if (lsinblock[tid] < 0 && lsinblock[tid + i] > 0) {
lsinblock[tid + i] = (float)tid;
}
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
D_Score[bid] = lsinblock[0];
t = 0;
for (i = 0; i < 7 && t < 128 && t >= 0; i++) {
t = (int)lsinblock[(int)powf(2.0, i) + t];
}
lsinblock[0] = (float)t;
}
__syncthreads();
if (tid == (int)lsinblock[0]) {
for (i = 0; i < 4; i++) {
D_resP[bid * 4 + i] = bestparent[i];
}
}
}
__device__ void Dincr(int *bit, int n) {
while (n <= NODE_N) {
bit[n]++;
if (bit[n] >= 2) {
bit[n] = 0;
n++;
} else {
break;
}
}
return;
}
__device__ void DincrS(int *bit, int n) {
bit[n]++;
if (bit[n] >= STATE_N) {
bit[n] = 0;
Dincr(bit, n + 1);
}
return;
}
__device__ bool D_getState(int parN, int *sta, int time) {
int i, j = 1;
for (i = 0; i < parN; i++) {
j *= STATE_N;
}
j--;
if (time > j)
return false;
if (time >= 1)
DincrS(sta, 0);
return true;
}
__device__ void D_findComb(int *comb, int l, int n) {
const int len = 4;
if (l == 0) {
for (int i = 0; i < len; i++)
comb[i] = -1;
return;
}
int sum = 0;
int k = 1;
while (sum < l)
sum += D_C(n, k++);
l -= sum - D_C(n, --k);
int low = 0;
int pos = 0;
while (k > 1) {
sum = 0;
int s = 1;
while (sum < l)
sum += D_C(n - s++, k - 1);
l -= sum - D_C(n - (--s), --k);
low += s;
comb[pos++] = low;
n -= s;
}
comb[pos] = low + l;
for (int i = pos + 1; i < 4; i++)
comb[i] = -1;
}
__device__ int D_findindex(int *arr,
int size) { // reminder: arr[0] has to be 0 && size
// == array size-1 && index start from 0
int i, j, index = 0;
for (i = 1; i < size; i++) {
index += D_C(NODE_N - 1, i);
}
for (i = 1; i <= size - 1; i++) {
for (j = arr[i - 1] + 1; j <= arr[i] - 1; j++) {
index += D_C(NODE_N - 1 - j, size - i);
}
}
index += arr[size] - arr[size - 1];
return index;
}
__device__ int D_C(int n, int a) {
int i, res = 1, atmp = a;
for (i = 0; i < atmp; i++) {
res *= n;
n--;
}
for (i = 0; i < atmp; i++) {
res /= a;
a--;
}
return res;
}
#endif
| 30323325148608560cb4c7102bfb2e168581709e.cu | #ifndef _ORDERGRAPH_KERNEL_H_
#define _ORDERGRAPH_KERNEL_H_
#include "data50.cu"
#include <stdio.h>
;
char name[20] = "50.out";
__device__ void Dincr(int *bit, int n);
__device__ void DincrS(int *bit, int n);
__device__ bool D_getState(int parN, int *sta, int time);
__device__ void D_findComb(int *comb, int l, int n);
__device__ int D_findindex(int *arr, int size);
__device__ int D_C(int n, int a);
__global__ void genScoreKernel(int sizepernode, float *D_localscore,
int *D_data, float *D_LG) {
int id = blockIdx.x * 256 + threadIdx.x;
int node, index;
bool flag;
int parent[5] = {0};
int pre[NODE_N] = {0};
int state[5] = {0};
int i, j, parN = 0, tmp, t;
int t1 = 0, t2 = 0;
float ls = 0;
int Nij[STATE_N] = {0};
if (id < sizepernode) {
D_findComb(parent, id, NODE_N - 1);
for (i = 0; i < 4; i++) {
if (parent[i] > 0)
parN++;
}
for (node = 0; node < NODE_N; node++) {
j = 1;
for (i = 0; i < NODE_N; i++) {
if (i != node)
pre[j++] = i;
}
for (tmp = 0; tmp < parN; tmp++)
state[tmp] = 0;
index = sizepernode * node + id;
// priors
/*
for(tmp=1;tmp<=4;tmp++){
localscore[index]+=100*(prior[node][pre[parent[tmp]]]-0.5)*(prior[node][pre[parent[tmp]]]-0.5)*(prior[node][pre[parent[tmp]]]-0.5);
}
*/
t = 0;
while (D_getState(parN, state, t++)) { // for get state
// printf("test %u\n",id);
ls = 0;
for (tmp = 0; tmp < STATE_N; tmp++)
Nij[tmp] = 0;
for (t1 = 0; t1 < DATA_N; t1++) {
flag = true;
for (t2 = 0; t2 < parN; t2++) {
if (D_data[t1 * NODE_N + pre[parent[t2]]] != state[t2]) {
flag = false;
break;
}
}
if (!flag)
continue;
Nij[D_data[t1 * NODE_N + node]]++;
}
tmp = STATE_N - 1;
for (t1 = 0; t1 < STATE_N; t1++) {
ls += D_LG[Nij[t1]];
tmp += Nij[t1];
}
ls -= D_LG[tmp];
ls += D_LG[STATE_N - 1];
D_localscore[index] += ls;
}
}
}
}
__global__ void computeKernel(int taskperthr, int sizepernode,
float *D_localscore, bool *D_parent, int node,
int total, float *D_Score, int *D_resP) {
extern __shared__ float lsinblock[];
const unsigned int id = blockIdx.x * 256 + threadIdx.x;
const unsigned int tid = threadIdx.x;
const unsigned int bid = blockIdx.x;
int posN = 1, i, index, t, tmp;
int pre[NODE_N] = {0};
int parN = 0;
int bestparent[4] = {0}, parent[5] = {-1};
float bestls = -999999999999999, ls;
for (i = 0; i < NODE_N; i++) {
if (D_parent[i] == 1) {
pre[posN++] = i;
}
}
for (i = 0; i < taskperthr && ((id * taskperthr + i) < total); i++) {
D_findComb(parent, id * taskperthr + i, posN);
for (parN = 0; parN < 4; parN++) {
if (parent[parN] < 0)
break;
if (pre[parent[parN]] > node)
parent[parN] = pre[parent[parN]];
else
parent[parN] = pre[parent[parN]] + 1;
}
for (tmp = parN; tmp > 0; tmp--) {
parent[tmp] = parent[tmp - 1];
}
parent[0] = 0;
index = D_findindex(parent, parN);
index += sizepernode * node;
ls = D_localscore[index];
if (ls > bestls) {
bestls = ls;
for (tmp = 0; tmp < 4; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
lsinblock[tid] = bestls;
__syncthreads();
for (i = 128; i >= 1; i /= 2) {
if (tid < i) {
if (lsinblock[tid + i] > lsinblock[tid] && lsinblock[tid + i] < 0) {
lsinblock[tid] = lsinblock[tid + i];
lsinblock[tid + i] = (float)(tid + i);
} else if (lsinblock[tid + i] < lsinblock[tid] && lsinblock[tid] < 0) {
lsinblock[tid + i] = (float)tid;
} else if (lsinblock[tid] > 0 && lsinblock[tid + i] < 0) {
lsinblock[tid] = lsinblock[tid + i];
lsinblock[tid + i] = (float)(tid + i);
} else if (lsinblock[tid] < 0 && lsinblock[tid + i] > 0) {
lsinblock[tid + i] = (float)tid;
}
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
D_Score[bid] = lsinblock[0];
t = 0;
for (i = 0; i < 7 && t < 128 && t >= 0; i++) {
t = (int)lsinblock[(int)powf(2.0, i) + t];
}
lsinblock[0] = (float)t;
}
__syncthreads();
if (tid == (int)lsinblock[0]) {
for (i = 0; i < 4; i++) {
D_resP[bid * 4 + i] = bestparent[i];
}
}
}
__device__ void Dincr(int *bit, int n) {
while (n <= NODE_N) {
bit[n]++;
if (bit[n] >= 2) {
bit[n] = 0;
n++;
} else {
break;
}
}
return;
}
__device__ void DincrS(int *bit, int n) {
bit[n]++;
if (bit[n] >= STATE_N) {
bit[n] = 0;
Dincr(bit, n + 1);
}
return;
}
__device__ bool D_getState(int parN, int *sta, int time) {
int i, j = 1;
for (i = 0; i < parN; i++) {
j *= STATE_N;
}
j--;
if (time > j)
return false;
if (time >= 1)
DincrS(sta, 0);
return true;
}
__device__ void D_findComb(int *comb, int l, int n) {
const int len = 4;
if (l == 0) {
for (int i = 0; i < len; i++)
comb[i] = -1;
return;
}
int sum = 0;
int k = 1;
while (sum < l)
sum += D_C(n, k++);
l -= sum - D_C(n, --k);
int low = 0;
int pos = 0;
while (k > 1) {
sum = 0;
int s = 1;
while (sum < l)
sum += D_C(n - s++, k - 1);
l -= sum - D_C(n - (--s), --k);
low += s;
comb[pos++] = low;
n -= s;
}
comb[pos] = low + l;
for (int i = pos + 1; i < 4; i++)
comb[i] = -1;
}
__device__ int D_findindex(int *arr,
int size) { // reminder: arr[0] has to be 0 && size
// == array size-1 && index start from 0
int i, j, index = 0;
for (i = 1; i < size; i++) {
index += D_C(NODE_N - 1, i);
}
for (i = 1; i <= size - 1; i++) {
for (j = arr[i - 1] + 1; j <= arr[i] - 1; j++) {
index += D_C(NODE_N - 1 - j, size - i);
}
}
index += arr[size] - arr[size - 1];
return index;
}
__device__ int D_C(int n, int a) {
int i, res = 1, atmp = a;
for (i = 0; i < atmp; i++) {
res *= n;
n--;
}
for (i = 0; i < atmp; i++) {
res /= a;
a--;
}
return res;
}
#endif
|
230e13cbcf3b902c0407c180da0a8d29afe969e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../physics.cu"
#include "genetics.cu"
// Includes for cuRAND library to access and use hiprandState_t to be used in genetic algorithm
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "../../headers/output.h" // for calling output methods
// Kernal caller to manage memory and values needed before calling it
// Input: h_pool - pointer to individual array that holds the individual parameters needing to be computed with
// h_constants - pointer to options struct that contains the constants needed related to the program
// Output: h_pool may contain individuals with valid solutions to hitting the target
__host__ void callGPU(individual * h_pool, options * h_constants) {
// Get how many threads and blocks to use
int numThreadsUsed = h_constants->num_threads_per;
// Holds how many blocks to use for the kernal to cover the entire pool, assuming that pop_size is equal to num_blocks * numThreads
int numBlocksUsed = h_constants->num_blocks;
// Store the number of bytes the pool array is and use when managing memory for CUDA
size_t poolMemSize = sizeof(individual)*h_constants->pop_size;
// Allocate and copy over memory into the device
individual * d_pool;
hipMalloc(&d_pool, poolMemSize);
hipMemcpy(d_pool, h_pool, poolMemSize, hipMemcpyHostToDevice);
individual * d_offset_temp;
hipMalloc(&d_offset_temp, poolMemSize);
// Allocate memory for constants object
options * d_constants;
hipMalloc(&d_constants, sizeof(options));
hipMemcpy(d_constants, h_constants, sizeof(options), hipMemcpyHostToDevice);
// Allocate hiprandState_t to use for random number generation in CUDA
hiprandState_t *d_state;
hipMalloc(&d_state, sizeof(hiprandState_t)*h_constants->pop_size);
// Allocate memory for integer object for determining if solution is found in a thread
int * d_foundSolution;
int * h_foundSolution = new int(0);
hipMalloc(&d_foundSolution, sizeof(int));
hipMemcpy(d_constants, h_foundSolution, sizeof(int), hipMemcpyHostToDevice);
// Initialize the random number generator into state
hipLaunchKernelGGL(( initializeRandom), dim3(numBlocksUsed), dim3(numThreadsUsed), 0, 0, d_pool, d_state, d_constants, d_foundSolution);
hipDeviceSynchronize();
// At this point all initialization is finished
int gen_count = 0;
do {
// Perform the algorithm
hipLaunchKernelGGL(( simulateGPU), dim3(numBlocksUsed), dim3(numThreadsUsed), 0, 0, d_constants, d_pool, d_foundSolution);
hipDeviceSynchronize();
// At this point all the simulations are finished including setting costs and found solution determined
// Copy foundSolution to see if a solution was reached
hipMemcpy(h_foundSolution, d_foundSolution, sizeof(int), hipMemcpyDeviceToHost);
if (*h_foundSolution == 0) { // No solution found yet, create new generation
hipLaunchKernelGGL(( geneticAlgorithm), dim3(numBlocksUsed), dim3(numThreadsUsed), 0, 0, d_pool, d_constants, d_state);
hipDeviceSynchronize();
// Offset 16 to help diversify the pool, done by calling offsetCopy twice (offset 8 each) to ensure no race condition across all threads
hipLaunchKernelGGL(( offsetCopy), dim3(numBlocksUsed), dim3(numThreadsUsed), 0, 0, d_pool, d_offset_temp, d_constants);
hipDeviceSynchronize();
hipLaunchKernelGGL(( offsetCopy), dim3(numBlocksUsed), dim3(numThreadsUsed), 0, 0, d_offset_temp, d_pool, d_constants);
hipDeviceSynchronize();
}
gen_count++; // Increment gen_count for next generation
} while (*h_foundSolution == 0 && gen_count < h_constants->max_generations); // continue loop until solution found or max generations reached
// End of algorithm
// Copy results of the pool into host memory
hipMemcpy(h_pool, d_pool, poolMemSize, hipMemcpyDeviceToHost);
// Free resources from device before ending function
hipFree(d_constants);
hipFree(d_pool);
hipFree(d_offset_temp);
hipFree(d_state);
hipFree(d_foundSolution);
// Deallocate host memory
delete h_foundSolution;
}
| 230e13cbcf3b902c0407c180da0a8d29afe969e3.cu | #include "../physics.cu"
#include "genetics.cu"
// Includes for cuRAND library to access and use curandState to be used in genetic algorithm
#include <curand.h>
#include <curand_kernel.h>
#include "../../headers/output.h" // for calling output methods
// Kernal caller to manage memory and values needed before calling it
// Input: h_pool - pointer to individual array that holds the individual parameters needing to be computed with
// h_constants - pointer to options struct that contains the constants needed related to the program
// Output: h_pool may contain individuals with valid solutions to hitting the target
__host__ void callGPU(individual * h_pool, options * h_constants) {
// Get how many threads and blocks to use
int numThreadsUsed = h_constants->num_threads_per;
// Holds how many blocks to use for the kernal to cover the entire pool, assuming that pop_size is equal to num_blocks * numThreads
int numBlocksUsed = h_constants->num_blocks;
// Store the number of bytes the pool array is and use when managing memory for CUDA
size_t poolMemSize = sizeof(individual)*h_constants->pop_size;
// Allocate and copy over memory into the device
individual * d_pool;
cudaMalloc(&d_pool, poolMemSize);
cudaMemcpy(d_pool, h_pool, poolMemSize, cudaMemcpyHostToDevice);
individual * d_offset_temp;
cudaMalloc(&d_offset_temp, poolMemSize);
// Allocate memory for constants object
options * d_constants;
cudaMalloc(&d_constants, sizeof(options));
cudaMemcpy(d_constants, h_constants, sizeof(options), cudaMemcpyHostToDevice);
// Allocate curandState to use for random number generation in CUDA
curandState_t *d_state;
cudaMalloc(&d_state, sizeof(curandState_t)*h_constants->pop_size);
// Allocate memory for integer object for determining if solution is found in a thread
int * d_foundSolution;
int * h_foundSolution = new int(0);
cudaMalloc(&d_foundSolution, sizeof(int));
cudaMemcpy(d_constants, h_foundSolution, sizeof(int), cudaMemcpyHostToDevice);
// Initialize the random number generator into state
initializeRandom<<<numBlocksUsed, numThreadsUsed>>>(d_pool, d_state, d_constants, d_foundSolution);
cudaDeviceSynchronize();
// At this point all initialization is finished
int gen_count = 0;
do {
// Perform the algorithm
simulateGPU<<<numBlocksUsed, numThreadsUsed>>>(d_constants, d_pool, d_foundSolution);
cudaDeviceSynchronize();
// At this point all the simulations are finished including setting costs and found solution determined
// Copy foundSolution to see if a solution was reached
cudaMemcpy(h_foundSolution, d_foundSolution, sizeof(int), cudaMemcpyDeviceToHost);
if (*h_foundSolution == 0) { // No solution found yet, create new generation
geneticAlgorithm<<<numBlocksUsed, numThreadsUsed>>>(d_pool, d_constants, d_state);
cudaDeviceSynchronize();
// Offset 16 to help diversify the pool, done by calling offsetCopy twice (offset 8 each) to ensure no race condition across all threads
offsetCopy<<<numBlocksUsed, numThreadsUsed>>>(d_pool, d_offset_temp, d_constants);
cudaDeviceSynchronize();
offsetCopy<<<numBlocksUsed, numThreadsUsed>>>(d_offset_temp, d_pool, d_constants);
cudaDeviceSynchronize();
}
gen_count++; // Increment gen_count for next generation
} while (*h_foundSolution == 0 && gen_count < h_constants->max_generations); // continue loop until solution found or max generations reached
// End of algorithm
// Copy results of the pool into host memory
cudaMemcpy(h_pool, d_pool, poolMemSize, cudaMemcpyDeviceToHost);
// Free resources from device before ending function
cudaFree(d_constants);
cudaFree(d_pool);
cudaFree(d_offset_temp);
cudaFree(d_state);
cudaFree(d_foundSolution);
// Deallocate host memory
delete h_foundSolution;
}
|
1b4a8c9220baf4b5f56f687c7dc388edc57a2566.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define blockSize 128
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
int *dev_data;
__global__ void upSweep(int *data, int n, int d) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int stride = powf(2, d + 1);
if (index >= n || index % stride != 0) {
return;
}
int index2 = index + powf(2, d) - 1;
data[index + stride - 1] += data[index2];
}
__global__ void downSweep(int *data, int n, int d) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int stride = powf(2, d + 1);
if (index >= n || index % stride != 0) {
return;
}
int index2 = index + powf(2, d) - 1;
int index3 = index + powf(2, d + 1) - 1;
int t = data[index2];
data[index2] = data[index3];
data[index3] += t;
}
__global__ void copyBuffer(const int *source, int *dest, int n) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
dest[index] = source[index];
}
__global__ void kern0LastElement(int *data, int n) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index != n - 1) {
return;
}
data[index] = 0;
}
__global__ void kernReduction(int *data, int n, int d) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int stride = powf(2, d + 1);
if (index >= n || index % stride != 0) {
return;
}
int index2 = index + powf(2, d) - 1;
data[index + stride - 1] += data[index2];
}
void printArray(const int *array, int n) {
printf("[");
for (int i = 0; i < n; i++) {
printf("%d, ", array[i]);
}
printf("]\n");
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int n2 = pow(2, ceil(log2(n)));
dim3 fullBlocksPerGrid((n2 + blockSize - 1) / blockSize);
hipMalloc((void**)&dev_data, n2 * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_data failed!");
hipMemcpy(dev_data, idata, sizeof(int) * n, hipMemcpyHostToDevice);
timer().startGpuTimer();
for (int d = 0; d < log2(n2); d++) {
upSweep << <fullBlocksPerGrid, blockSize >> > (dev_data, n2, d);
checkCUDAErrorWithLine("Up sweep failed!");
}
kern0LastElement << <fullBlocksPerGrid, blockSize >> > (dev_data, n2);
for (int d = log2(n2) - 1; d >= 0; d--) {
downSweep << <fullBlocksPerGrid, blockSize >> > (dev_data, n2, d);
checkCUDAErrorWithLine("Down sweep failed!");
}
timer().endGpuTimer();
hipMemcpy(odata, dev_data, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(dev_data);
}
int *dev_bools;
int *dev_idata;
int *dev_odata;
int *dev_scanned;
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int n2 = pow(2, ceil(log2(n)));
dim3 fullBlocksPerGrid((n2 + blockSize - 1) / blockSize);
hipMalloc((void**)&dev_bools, n2 * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_bools failed!");
hipMalloc((void**)&dev_idata, n2 * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_idata failed!");
hipMalloc((void**)&dev_odata, n2 * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
hipMalloc((void**)&dev_scanned, n2 * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_scanned failed!");
hipMemcpy(dev_idata, idata, sizeof(int) * n, hipMemcpyHostToDevice);
timer().startGpuTimer();
Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> > (n2, dev_bools, dev_idata);
hipMemcpy(dev_scanned, dev_bools, sizeof(int) * n2, hipMemcpyDeviceToDevice);
for (int d = 0; d < log2(n2); d++) {
upSweep << <fullBlocksPerGrid, blockSize >> > (dev_scanned, n2, d);
checkCUDAErrorWithLine("Up sweep failed!");
}
kern0LastElement << <fullBlocksPerGrid, blockSize >> > (dev_scanned, n2);
for (int d = log2(n2) - 1; d >= 0; d--) {
downSweep << <fullBlocksPerGrid, blockSize >> > (dev_scanned, n2, d);
checkCUDAErrorWithLine("Down sweep failed!");
}
Common::kernScatter << <fullBlocksPerGrid, blockSize >> > (n2, dev_odata, dev_idata, dev_bools, dev_scanned);
for (int d = 0; d < log2(n2); d++) {
kernReduction << <fullBlocksPerGrid, blockSize >> > (dev_bools, n2, d);
checkCUDAErrorWithLine("Reduction failed!");
}
timer().endGpuTimer();
int *summedBools = new int[n2];
hipMemcpy(odata, dev_odata, sizeof(int) * n, hipMemcpyDeviceToHost);
hipMemcpy(summedBools, dev_bools, sizeof(int) * n2, hipMemcpyDeviceToHost);
int toReturn = summedBools[n2 - 1];
hipFree(dev_bools);
hipFree(dev_idata);
hipFree(dev_odata);
hipFree(dev_scanned);
delete[] summedBools;
return toReturn;
}
}
}
| 1b4a8c9220baf4b5f56f687c7dc388edc57a2566.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define blockSize 128
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
int *dev_data;
__global__ void upSweep(int *data, int n, int d) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int stride = powf(2, d + 1);
if (index >= n || index % stride != 0) {
return;
}
int index2 = index + powf(2, d) - 1;
data[index + stride - 1] += data[index2];
}
__global__ void downSweep(int *data, int n, int d) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int stride = powf(2, d + 1);
if (index >= n || index % stride != 0) {
return;
}
int index2 = index + powf(2, d) - 1;
int index3 = index + powf(2, d + 1) - 1;
int t = data[index2];
data[index2] = data[index3];
data[index3] += t;
}
__global__ void copyBuffer(const int *source, int *dest, int n) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
dest[index] = source[index];
}
__global__ void kern0LastElement(int *data, int n) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index != n - 1) {
return;
}
data[index] = 0;
}
__global__ void kernReduction(int *data, int n, int d) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
int stride = powf(2, d + 1);
if (index >= n || index % stride != 0) {
return;
}
int index2 = index + powf(2, d) - 1;
data[index + stride - 1] += data[index2];
}
void printArray(const int *array, int n) {
printf("[");
for (int i = 0; i < n; i++) {
printf("%d, ", array[i]);
}
printf("]\n");
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int n2 = pow(2, ceil(log2(n)));
dim3 fullBlocksPerGrid((n2 + blockSize - 1) / blockSize);
cudaMalloc((void**)&dev_data, n2 * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_data failed!");
cudaMemcpy(dev_data, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
timer().startGpuTimer();
for (int d = 0; d < log2(n2); d++) {
upSweep << <fullBlocksPerGrid, blockSize >> > (dev_data, n2, d);
checkCUDAErrorWithLine("Up sweep failed!");
}
kern0LastElement << <fullBlocksPerGrid, blockSize >> > (dev_data, n2);
for (int d = log2(n2) - 1; d >= 0; d--) {
downSweep << <fullBlocksPerGrid, blockSize >> > (dev_data, n2, d);
checkCUDAErrorWithLine("Down sweep failed!");
}
timer().endGpuTimer();
cudaMemcpy(odata, dev_data, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(dev_data);
}
int *dev_bools;
int *dev_idata;
int *dev_odata;
int *dev_scanned;
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int n2 = pow(2, ceil(log2(n)));
dim3 fullBlocksPerGrid((n2 + blockSize - 1) / blockSize);
cudaMalloc((void**)&dev_bools, n2 * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_bools failed!");
cudaMalloc((void**)&dev_idata, n2 * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_idata failed!");
cudaMalloc((void**)&dev_odata, n2 * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
cudaMalloc((void**)&dev_scanned, n2 * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_scanned failed!");
cudaMemcpy(dev_idata, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
timer().startGpuTimer();
Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> > (n2, dev_bools, dev_idata);
cudaMemcpy(dev_scanned, dev_bools, sizeof(int) * n2, cudaMemcpyDeviceToDevice);
for (int d = 0; d < log2(n2); d++) {
upSweep << <fullBlocksPerGrid, blockSize >> > (dev_scanned, n2, d);
checkCUDAErrorWithLine("Up sweep failed!");
}
kern0LastElement << <fullBlocksPerGrid, blockSize >> > (dev_scanned, n2);
for (int d = log2(n2) - 1; d >= 0; d--) {
downSweep << <fullBlocksPerGrid, blockSize >> > (dev_scanned, n2, d);
checkCUDAErrorWithLine("Down sweep failed!");
}
Common::kernScatter << <fullBlocksPerGrid, blockSize >> > (n2, dev_odata, dev_idata, dev_bools, dev_scanned);
for (int d = 0; d < log2(n2); d++) {
kernReduction << <fullBlocksPerGrid, blockSize >> > (dev_bools, n2, d);
checkCUDAErrorWithLine("Reduction failed!");
}
timer().endGpuTimer();
int *summedBools = new int[n2];
cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaMemcpy(summedBools, dev_bools, sizeof(int) * n2, cudaMemcpyDeviceToHost);
int toReturn = summedBools[n2 - 1];
cudaFree(dev_bools);
cudaFree(dev_idata);
cudaFree(dev_odata);
cudaFree(dev_scanned);
delete[] summedBools;
return toReturn;
}
}
}
|
86ba9196140d3df0801de183103744ee1f6a1c4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2007 A. Arnold and J. A. van Meel, FOM institute
AMOLF, Amsterdam; all rights reserved unless otherwise stated.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
In addition to the regulations of the GNU General Public License,
publications and communications based in parts on this program or on
parts of this program are required to cite the article
"Harvesting graphics power for MD simulations"
by J.A. van Meel, A. Arnold, D. Frenkel, S. F. Portegies Zwart and
R. G. Belleman, Molecular Simulation, Vol. 34, p. 259 (2007).
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
*/
#include "gpu.hpp"
#include "random.hpp"
/************************************************
* GPU kernels
************************************************/
/************************************************
* RNG iteration
************************************************/
/*
__device__
static uint2 RNG_rand48_iterate_single(uint2 Xn, uint2 A, uint2 C)
{
// results and Xn are 2x 24bit to handle overflows optimally, i.e.
// in one operation.
// the multiplication commands however give the low and hi 32 bit,
// which have to be converted as follows:
// 48bit in bytes = ABCD EF (space marks 32bit boundary)
// R0 = ABC
// R1 = D EF
unsigned int R0, R1;
// low 24-bit multiplication
const unsigned int lo00 = __umul24(Xn.x, A.x);
const unsigned int hi00 = __umulhi(Xn.x, A.x);
// 24bit distribution of 32bit multiplication results
R0 = (lo00 & 0xFFFFFF);
R1 = (lo00 >> 24) | (hi00 << 8);
R0 += C.x; R1 += C.y;
// transfer overflows
R1 += (R0 >> 24);
R0 &= 0xFFFFFF;
// cross-terms, low/hi 24-bit multiplication
R1 += __umul24(Xn.y, A.x);
R1 += __umul24(Xn.x, A.y);
R1 &= 0xFFFFFF;
return make_uint2(R0, R1);
}
__device__ void BoxMuller(float& u1, float& u2)
{
u1 = max(u1, 1e-20);
u2 = max(u2, 1e-20);
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * M_PI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
__global__
static void RNG_rand48_get_float(uint2 *state, float *res, int num_blocks, uint2 A, uint2 C)
{
const int nThreads = blockDim.x*gridDim.x;
const float rand_max = 2147483647.0f;
float t0, t1;
// load the current state of the RNG into a register
int nOutIdx = threadIdx.x + blockIdx.x*blockDim.x;
uint2 lstate = state[nOutIdx];
int i;
for (i = 0; i < num_blocks; i+=2) {
// Assumes positive even num_blocks
// get upper 31 (!) bits of the 2x 24bits
t0 = (( lstate.x >> 17 ) | ( lstate.y << 7)) / rand_max;
// this actually iterates the RNG
lstate = RNG_rand48_iterate_single(lstate, A, C);
// get upper 31 (!) bits of the 2x 24bits
t1 = (( lstate.x >> 17 ) | ( lstate.y << 7)) / rand_max;
// this actually iterates the RNG
lstate = RNG_rand48_iterate_single(lstate, A, C);
BoxMuller(t0,t1);
res[nOutIdx] = t0;
nOutIdx += nThreads;
res[nOutIdx] = t1;
nOutIdx += nThreads;
}
nOutIdx = threadIdx.x + blockIdx.x*blockDim.x;
state[nOutIdx] = lstate;
}
*/
/************************************************
* RNG_rand48 implementation
************************************************/
void
RNG_rand48::init(const int& seed,
const int& nThreads,
unsigned int& A0_out,
unsigned int& A1_out,
unsigned int& C0_out,
unsigned int& C1_out) {
// setup execution grid to get max performance
// threadsX = num_threads_per_block;
// blocksX = num_bocks;
//const int nThreads = num_threads;//threadsX*blocksX;
uint2* seeds = new uint2[ nThreads ];
cutilSafeCall( hipMalloc( (void**) &state, sizeof(uint2)*nThreads ) );
// calculate strided iteration constants
unsigned long long A, C;
A = 1LL; C = 0LL;
for (unsigned int i = 0; i < nThreads; ++i) {
C += A*c;
A *= a;
}
A0_out = A & 0xFFFFFFLL;
A1_out = (A >> 24) & 0xFFFFFFLL;
C0_out = C & 0xFFFFFFLL;
C1_out = (C >> 24) & 0xFFFFFFLL;
// prepare first nThreads random numbers from seed
unsigned long long x = (((unsigned long long)seed) << 16) | 0x330E;
for (unsigned int i = 0; i < nThreads; ++i) {
x = a*x + c;
seeds[i].x = x & 0xFFFFFFLL;
seeds[i].y = (x >> 24) & 0xFFFFFFLL;
}
cutilSafeCall(hipMemcpy((void*)state, (void*)seeds, sizeof(uint2)*nThreads, hipMemcpyHostToDevice));
delete[] seeds;
}
uint2* RNG_rand48::get_state_ptr() { return state; }
void
RNG_rand48::cleanup() {
cutilSafeCall(hipFree((void*) state));
}
void
RNG_rand48::generate(int n) {
/*
const int nThreads = threadsX*blocksX;
int num_blocks = (n + nThreads-1)/nThreads;
if (res == 0) {
cutilSafeCall(hipMalloc( (void**) &res, sizeof(float)*nThreads*num_blocks));
}
dim3 grid( blocksX, 1, 1);
dim3 threads( threadsX, 1, 1);
uint2 A, C;
A.x = A0; A.y = A1;
C.x = C0; C.y = C1;
// call GPU kernel
RNG_rand48_get_float<<< grid, threads >>>((uint2 *)state, (float*)res, num_blocks, A, C);
*/
}
void
RNG_rand48::get(float *r, int n) {
cutilSafeCall(hipMemcpy( r, res, sizeof(float)*n, hipMemcpyDeviceToHost ) );
}
| 86ba9196140d3df0801de183103744ee1f6a1c4d.cu | /*
Copyright (c) 2007 A. Arnold and J. A. van Meel, FOM institute
AMOLF, Amsterdam; all rights reserved unless otherwise stated.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
In addition to the regulations of the GNU General Public License,
publications and communications based in parts on this program or on
parts of this program are required to cite the article
"Harvesting graphics power for MD simulations"
by J.A. van Meel, A. Arnold, D. Frenkel, S. F. Portegies Zwart and
R. G. Belleman, Molecular Simulation, Vol. 34, p. 259 (2007).
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
*/
#include "gpu.hpp"
#include "random.hpp"
/************************************************
* GPU kernels
************************************************/
/************************************************
* RNG iteration
************************************************/
/*
__device__
static uint2 RNG_rand48_iterate_single(uint2 Xn, uint2 A, uint2 C)
{
// results and Xn are 2x 24bit to handle overflows optimally, i.e.
// in one operation.
// the multiplication commands however give the low and hi 32 bit,
// which have to be converted as follows:
// 48bit in bytes = ABCD EF (space marks 32bit boundary)
// R0 = ABC
// R1 = D EF
unsigned int R0, R1;
// low 24-bit multiplication
const unsigned int lo00 = __umul24(Xn.x, A.x);
const unsigned int hi00 = __umulhi(Xn.x, A.x);
// 24bit distribution of 32bit multiplication results
R0 = (lo00 & 0xFFFFFF);
R1 = (lo00 >> 24) | (hi00 << 8);
R0 += C.x; R1 += C.y;
// transfer overflows
R1 += (R0 >> 24);
R0 &= 0xFFFFFF;
// cross-terms, low/hi 24-bit multiplication
R1 += __umul24(Xn.y, A.x);
R1 += __umul24(Xn.x, A.y);
R1 &= 0xFFFFFF;
return make_uint2(R0, R1);
}
__device__ void BoxMuller(float& u1, float& u2)
{
u1 = max(u1, 1e-20);
u2 = max(u2, 1e-20);
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * M_PI * u2;
u1 = r * __cosf(phi);
u2 = r * __sinf(phi);
}
__global__
static void RNG_rand48_get_float(uint2 *state, float *res, int num_blocks, uint2 A, uint2 C)
{
const int nThreads = blockDim.x*gridDim.x;
const float rand_max = 2147483647.0f;
float t0, t1;
// load the current state of the RNG into a register
int nOutIdx = threadIdx.x + blockIdx.x*blockDim.x;
uint2 lstate = state[nOutIdx];
int i;
for (i = 0; i < num_blocks; i+=2) {
// Assumes positive even num_blocks
// get upper 31 (!) bits of the 2x 24bits
t0 = (( lstate.x >> 17 ) | ( lstate.y << 7)) / rand_max;
// this actually iterates the RNG
lstate = RNG_rand48_iterate_single(lstate, A, C);
// get upper 31 (!) bits of the 2x 24bits
t1 = (( lstate.x >> 17 ) | ( lstate.y << 7)) / rand_max;
// this actually iterates the RNG
lstate = RNG_rand48_iterate_single(lstate, A, C);
BoxMuller(t0,t1);
res[nOutIdx] = t0;
nOutIdx += nThreads;
res[nOutIdx] = t1;
nOutIdx += nThreads;
}
nOutIdx = threadIdx.x + blockIdx.x*blockDim.x;
state[nOutIdx] = lstate;
}
*/
/************************************************
* RNG_rand48 implementation
************************************************/
void
RNG_rand48::init(const int& seed,
const int& nThreads,
unsigned int& A0_out,
unsigned int& A1_out,
unsigned int& C0_out,
unsigned int& C1_out) {
// setup execution grid to get max performance
// threadsX = num_threads_per_block;
// blocksX = num_bocks;
//const int nThreads = num_threads;//threadsX*blocksX;
uint2* seeds = new uint2[ nThreads ];
cutilSafeCall( cudaMalloc( (void**) &state, sizeof(uint2)*nThreads ) );
// calculate strided iteration constants
unsigned long long A, C;
A = 1LL; C = 0LL;
for (unsigned int i = 0; i < nThreads; ++i) {
C += A*c;
A *= a;
}
A0_out = A & 0xFFFFFFLL;
A1_out = (A >> 24) & 0xFFFFFFLL;
C0_out = C & 0xFFFFFFLL;
C1_out = (C >> 24) & 0xFFFFFFLL;
// prepare first nThreads random numbers from seed
unsigned long long x = (((unsigned long long)seed) << 16) | 0x330E;
for (unsigned int i = 0; i < nThreads; ++i) {
x = a*x + c;
seeds[i].x = x & 0xFFFFFFLL;
seeds[i].y = (x >> 24) & 0xFFFFFFLL;
}
cutilSafeCall(cudaMemcpy((void*)state, (void*)seeds, sizeof(uint2)*nThreads, cudaMemcpyHostToDevice));
delete[] seeds;
}
uint2* RNG_rand48::get_state_ptr() { return state; }
void
RNG_rand48::cleanup() {
cutilSafeCall(cudaFree((void*) state));
}
void
RNG_rand48::generate(int n) {
/*
const int nThreads = threadsX*blocksX;
int num_blocks = (n + nThreads-1)/nThreads;
if (res == 0) {
cutilSafeCall(cudaMalloc( (void**) &res, sizeof(float)*nThreads*num_blocks));
}
dim3 grid( blocksX, 1, 1);
dim3 threads( threadsX, 1, 1);
uint2 A, C;
A.x = A0; A.y = A1;
C.x = C0; C.y = C1;
// call GPU kernel
RNG_rand48_get_float<<< grid, threads >>>((uint2 *)state, (float*)res, num_blocks, A, C);
*/
}
void
RNG_rand48::get(float *r, int n) {
cutilSafeCall(cudaMemcpy( r, res, sizeof(float)*n, cudaMemcpyDeviceToHost ) );
}
|
49b14bfc940fb157c60e55001b4048a911c341e1.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/batch_conv_bias/int8/kimpl/batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4_per_chan_hswish.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_batch_cuda_conv_bias_kern_impls.py
#include "../batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4.cuinl"
template void megdnn::cuda::batch_conv_bias::do_batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
int* d_workspace,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
| 49b14bfc940fb157c60e55001b4048a911c341e1.cu | /**
* \file dnn/src/cuda/batch_conv_bias/int8/kimpl/batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4_per_chan_hswish.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_batch_cuda_conv_bias_kern_impls.py
#include "../batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4.cuinl"
template void megdnn::cuda::batch_conv_bias::do_batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
int* d_workspace,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::BatchConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
f85e70da0067326b1f5fcb2b1ddd9585abef7032.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header_hip.cuh"
#include "gpu_memory.cuh"
extern __shared__ double y_shared[];
__device__ void interpolate_gamma( cvklu_data *rate_data, double T, double *gamma, double *dgamma_dT )
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
log_temp_out = log(T);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
*gamma = rate_data->g_gammaH2_1[bin_id] + Tdef * (rate_data->g_gammaH2_1[bin_id+1] - rate_data->g_gammaH2_1[bin_id]);
*dgamma_dT = rate_data->g_dgammaH2_1_dT[bin_id] + Tdef * (rate_data->g_dgammaH2_1_dT[bin_id+1] - rate_data->g_dgammaH2_1_dT[bin_id]);
}
__device__ void evaluate_temperature( double* T, double* dTs_ge, double *y, const double mdensity, cvklu_data *rate_data )
{
// iterate temperature to convergence
double t, tnew, tdiff;
double dge, dge_dT;
double gammaH2, dgammaH2_dT, _gammaH2_m1;
int count = 0;
int MAX_ITERATION = 100;
double gamma = 5./3.;
double _gamma_m1 = 1.0 / (gamma - 1.0);
double kb = 1.3806504e-16; // Boltzamann constant [erg/K]
// prepare t, tnew for the newton's iteration;
t = *T;
if (t != t) t = 1000.0;
tnew = 1.1*t;
tdiff = tnew - t;
while ( tdiff/ tnew > 0.001 ){
// We do Newton's Iteration to calculate the temperature
// Since gammaH2 is dependent on the temperature too!
interpolate_gamma( rate_data, t, &gammaH2, &dgammaH2_dT );
_gammaH2_m1 = 1.0 / (gammaH2 - 1.0);
dge_dT = t*kb*(-y[INDEX(0)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT - y[INDEX(1)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT)/(mdensity)
+ kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1 + y[INDEX(4)]*_gamma_m1
+ y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity);
dge = t*kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1
+ y[INDEX(4)]*_gamma_m1 + y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity) - y[INDEX(9)];
//This is the change in ge for each iteration
tnew = t - dge/dge_dT;
count += 1;
tdiff = fabs(t - tnew);
t = tnew;
if (count > MAX_ITERATION){
printf("T[tid = %d] failed to converge (iteration: %d); at T = %0.3g \n", T_ID, count, tnew );
}
if ( t!= t && T_ID == 0){
printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", T_ID, t, count, y[INDEX(9)], gammaH2);
t = 1000.0;
for (int i = 0; i < 10; i++){
printf("y[S_INDEX(%d)] = %0.5g \n", i, y[S_INDEX(i)]);
}
break;
}
}
// update the temperature;
*T = t;
*dTs_ge = 1.0 / dge_dT;
// printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", tid, t, count, y[INDEX(9)], gammaH2);
}
__device__ void interpolate_reaction_rates( double *reaction_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, invTs, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
invTs = 1.0 / temp_out;
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
reaction_rates_out[INDEX( 0)] = rate_data->r_k01[bin_id] + Tdef * (rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id]);
reaction_rates_out[INDEX( 1)] = rate_data->r_k02[bin_id] + Tdef * (rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id]);
reaction_rates_out[INDEX( 2)] = rate_data->r_k03[bin_id] + Tdef * (rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id]);
reaction_rates_out[INDEX( 3)] = rate_data->r_k04[bin_id] + Tdef * (rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id]);
reaction_rates_out[INDEX( 4)] = rate_data->r_k05[bin_id] + Tdef * (rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id]);
reaction_rates_out[INDEX( 5)] = rate_data->r_k06[bin_id] + Tdef * (rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id]);
reaction_rates_out[INDEX( 6)] = rate_data->r_k07[bin_id] + Tdef * (rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id]);
reaction_rates_out[INDEX( 7)] = rate_data->r_k08[bin_id] + Tdef * (rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id]);
reaction_rates_out[INDEX( 8)] = rate_data->r_k09[bin_id] + Tdef * (rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id]);
reaction_rates_out[INDEX( 9)] = rate_data->r_k10[bin_id] + Tdef * (rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id]);
reaction_rates_out[INDEX(10)] = rate_data->r_k11[bin_id] + Tdef * (rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id]);
reaction_rates_out[INDEX(11)] = rate_data->r_k12[bin_id] + Tdef * (rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id]);
reaction_rates_out[INDEX(12)] = rate_data->r_k13[bin_id] + Tdef * (rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id]);
reaction_rates_out[INDEX(13)] = rate_data->r_k14[bin_id] + Tdef * (rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id]);
reaction_rates_out[INDEX(14)] = rate_data->r_k15[bin_id] + Tdef * (rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id]);
reaction_rates_out[INDEX(15)] = rate_data->r_k16[bin_id] + Tdef * (rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id]);
reaction_rates_out[INDEX(16)] = rate_data->r_k17[bin_id] + Tdef * (rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id]);
reaction_rates_out[INDEX(17)] = rate_data->r_k18[bin_id] + Tdef * (rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id]);
reaction_rates_out[INDEX(18)] = rate_data->r_k19[bin_id] + Tdef * (rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id]);
//reaction_rates_out[INDEX(19)] = rate_data->r_k20[bin_id] + Tdef * (rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id]);
reaction_rates_out[INDEX(20)] = rate_data->r_k21[bin_id] + Tdef * (rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id]);
reaction_rates_out[INDEX(21)] = rate_data->r_k22[bin_id] + Tdef * (rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id]);
//reaction_rates_out[INDEX(22)] = rate_data->r_k23[bin_id] + Tdef * (rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id]);
}
__device__ void interpolate_cooling_rates( double *cooling_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
/*
if (T_ID == 0){
printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out);
}
*/
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
cooling_rates_out[INDEX( 0)] = rate_data->c_ceHI_ceHI[bin_id] + Tdef * (rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id]);
cooling_rates_out[INDEX( 1)] = rate_data->c_ceHeI_ceHeI[bin_id] + Tdef * (rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id]);
cooling_rates_out[INDEX( 2)] = rate_data->c_ceHeII_ceHeII[bin_id] + Tdef * (rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id]);
cooling_rates_out[INDEX( 3)] = rate_data->c_ciHeIS_ciHeIS[bin_id] + Tdef * (rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id]);
cooling_rates_out[INDEX( 4)] = rate_data->c_ciHI_ciHI[bin_id] + Tdef * (rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id]);
cooling_rates_out[INDEX( 5)] = rate_data->c_ciHeI_ciHeI[bin_id] + Tdef * (rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id]);
cooling_rates_out[INDEX( 6)] = rate_data->c_ciHeII_ciHeII[bin_id] + Tdef * (rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id]);
cooling_rates_out[INDEX( 7)] = rate_data->c_reHII_reHII[bin_id] + Tdef * (rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id]);
cooling_rates_out[INDEX( 8)] = rate_data->c_reHeII1_reHeII1[bin_id] + Tdef * (rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id]);
cooling_rates_out[INDEX( 9)] = rate_data->c_reHeII2_reHeII2[bin_id] + Tdef * (rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id]);
cooling_rates_out[INDEX(10)] = rate_data->c_reHeIII_reHeIII[bin_id] + Tdef * (rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id]);
cooling_rates_out[INDEX(11)] = rate_data->c_brem_brem[bin_id] + Tdef * (rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id]);
cooling_rates_out[INDEX(12)] = rate_data->c_gloverabel08_gaHI[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id]);
cooling_rates_out[INDEX(13)] = rate_data->c_gloverabel08_gaH2[bin_id] + Tdef * (rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id]);
cooling_rates_out[INDEX(14)] = rate_data->c_gloverabel08_gaHe[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id]);
cooling_rates_out[INDEX(15)] = rate_data->c_gloverabel08_gaHp[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id]);
cooling_rates_out[INDEX(16)] = rate_data->c_gloverabel08_gael[bin_id] + Tdef * (rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id]);
cooling_rates_out[INDEX(17)] = rate_data->c_gloverabel08_h2lte[bin_id] + Tdef * (rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id]);
cooling_rates_out[INDEX(18)] = rate_data->c_compton_comp_[bin_id] + Tdef * (rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id]);
cooling_rates_out[INDEX(19)] = rate_data->c_gammah_gammah[bin_id] + Tdef * (rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id]);
cooling_rates_out[INDEX(20)] = rate_data->c_h2formation_h2mheat[bin_id] + Tdef * (rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id]);
cooling_rates_out[INDEX(21)] = rate_data->c_h2formation_h2mcool[bin_id] + Tdef * (rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id]);
cooling_rates_out[INDEX(22)] = rate_data->c_h2formation_ncrn[bin_id] + Tdef * (rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id]);
cooling_rates_out[INDEX(23)] = rate_data->c_h2formation_ncrd1[bin_id] + Tdef * (rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id]);
cooling_rates_out[INDEX(24)] = rate_data->c_h2formation_ncrd2[bin_id] + Tdef * (rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id]);
cooling_rates_out[INDEX(25)] = rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
cooling_rates_out[INDEX(26)] = 1.0; //rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
}
__device__ void interpolate_dcrate_dT(double *dcr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, inv_Ts, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
inv_Ts = temp_out;
//ceHI_ceHI: 0
dcr_dT[INDEX( 0)] = rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id];
dcr_dT[INDEX( 0)] /= dT ;
dcr_dT[INDEX( 0)] /= inv_Ts ;
//ceHeI_ceHeI: 1
dcr_dT[INDEX( 1)] = rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id];
dcr_dT[INDEX( 1)] /= dT ;
dcr_dT[INDEX( 1)] /= inv_Ts ;
//ceHeII_ceHeII: 2
dcr_dT[INDEX( 2)] = rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id];
dcr_dT[INDEX( 2)] /= dT ;
dcr_dT[INDEX( 2)] /= inv_Ts ;
//ciHeIS_ciHeIS: 3
dcr_dT[INDEX( 3)] = rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id];
dcr_dT[INDEX( 3)] /= dT ;
dcr_dT[INDEX( 3)] /= inv_Ts ;
//ciHI_ciHI: 4
dcr_dT[INDEX( 4)] = rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id];
dcr_dT[INDEX( 4)] /= dT ;
dcr_dT[INDEX( 4)] /= inv_Ts ;
//ciHeI_ciHeI: 5
dcr_dT[INDEX( 5)] = rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id];
dcr_dT[INDEX( 5)] /= dT ;
dcr_dT[INDEX( 5)] /= inv_Ts ;
//ciHeII_ciHeII: 6
dcr_dT[INDEX( 6)] = rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id];
dcr_dT[INDEX( 6)] /= dT ;
dcr_dT[INDEX( 6)] /= inv_Ts ;
//reHII_reHII: 7
dcr_dT[INDEX( 7)] = rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id];
dcr_dT[INDEX( 7)] /= dT ;
dcr_dT[INDEX( 7)] /= inv_Ts ;
//reHeII1_reHeII1: 8
dcr_dT[INDEX( 8)] = rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id];
dcr_dT[INDEX( 8)] /= dT ;
dcr_dT[INDEX( 8)] /= inv_Ts ;
//reHeII2_reHeII2: 9
dcr_dT[INDEX( 9)] = rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id];
dcr_dT[INDEX( 9)] /= dT ;
dcr_dT[INDEX( 9)] /= inv_Ts ;
//reHeIII_reHeIII: 10
dcr_dT[INDEX(10)] = rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id];
dcr_dT[INDEX(10)] /= dT ;
dcr_dT[INDEX(10)] /= inv_Ts ;
//brem_brem: 11
dcr_dT[INDEX(11)] = rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id];
dcr_dT[INDEX(11)] /= dT ;
dcr_dT[INDEX(11)] /= inv_Ts ;
//gloverabel08_gaHI: 12
dcr_dT[INDEX(12)] = rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id];
dcr_dT[INDEX(12)] /= dT ;
dcr_dT[INDEX(12)] /= inv_Ts ;
//gloverabel08_gaH2: 13
dcr_dT[INDEX(13)] = rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id];
dcr_dT[INDEX(13)] /= dT ;
dcr_dT[INDEX(13)] /= inv_Ts ;
//gloverabel08_gaHe: 14
dcr_dT[INDEX(14)] = rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id];
dcr_dT[INDEX(14)] /= dT ;
dcr_dT[INDEX(14)] /= inv_Ts ;
//gloverabel08_gaHp: 15
dcr_dT[INDEX(15)] = rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id];
dcr_dT[INDEX(15)] /= dT ;
dcr_dT[INDEX(15)] /= inv_Ts ;
//gloverabel08_gael: 16
dcr_dT[INDEX(16)] = rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id];
dcr_dT[INDEX(16)] /= dT ;
dcr_dT[INDEX(16)] /= inv_Ts ;
//gloverabel08_h2lte: 17
dcr_dT[INDEX(17)] = rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id];
dcr_dT[INDEX(17)] /= dT ;
dcr_dT[INDEX(17)] /= inv_Ts ;
//compton_comp_: 18
dcr_dT[INDEX(18)] = rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id];
dcr_dT[INDEX(18)] /= dT ;
dcr_dT[INDEX(18)] /= inv_Ts ;
//gammah_gammah: 19
dcr_dT[INDEX(19)] = rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id];
dcr_dT[INDEX(19)] /= dT ;
dcr_dT[INDEX(19)] /= inv_Ts ;
//h2formation_h2mheat: 20
dcr_dT[INDEX(20)] = rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id];
dcr_dT[INDEX(20)] /= dT ;
dcr_dT[INDEX(20)] /= inv_Ts ;
//h2formation_h2mcool: 21
dcr_dT[INDEX(21)] = rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id];
dcr_dT[INDEX(21)] /= dT ;
dcr_dT[INDEX(21)] /= inv_Ts ;
//h2formation_ncrn: 22
dcr_dT[INDEX(22)] = rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id];
dcr_dT[INDEX(22)] /= dT ;
dcr_dT[INDEX(22)] /= inv_Ts ;
//h2formation_ncrd1: 23
dcr_dT[INDEX(23)] = rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id];
dcr_dT[INDEX(23)] /= dT ;
dcr_dT[INDEX(23)] /= inv_Ts ;
//h2formation_ncrd2: 24
dcr_dT[INDEX(24)] = rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id];
dcr_dT[INDEX(24)] /= dT ;
dcr_dT[INDEX(24)] /= inv_Ts ;
//cie_cooling_cieco: 25
dcr_dT[INDEX(25)] = rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id];
dcr_dT[INDEX(25)] /= dT ;
dcr_dT[INDEX(25)] /= inv_Ts ;
//cie_optical_depth_approx: 26
//dcr_dT[INDEX(26)] = rate_data->c_cie_optical_depth_approx[bin_id+1] - rate_data->c_cie_optical_depth_approx[bin_id];
//dcr_dT[INDEX(26)] /= dT ;
//dcr_dT[INDEX(26)] /= inv_Ts ;
dcr_dT[INDEX(26)] = 0.0;
}
__device__ void interpolate_drrate_dT(double *drr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, inv_Ts, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
inv_Ts = temp_out;
//k01: 0
drr_dT[INDEX( 0)] = rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id];
drr_dT[INDEX( 0)] /= dT ;
drr_dT[INDEX( 0)] /= inv_Ts ;
//k02: 1
drr_dT[INDEX( 1)] = rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id];
drr_dT[INDEX( 1)] /= dT ;
drr_dT[INDEX( 1)] /= inv_Ts ;
//k03: 2
drr_dT[INDEX( 2)] = rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id];
drr_dT[INDEX( 2)] /= dT ;
drr_dT[INDEX( 2)] /= inv_Ts ;
//k04: 3
drr_dT[INDEX( 3)] = rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id];
drr_dT[INDEX( 3)] /= dT ;
drr_dT[INDEX( 3)] /= inv_Ts ;
//k05: 4
drr_dT[INDEX( 4)] = rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id];
drr_dT[INDEX( 4)] /= dT ;
drr_dT[INDEX( 4)] /= inv_Ts ;
//k06: 5
drr_dT[INDEX( 5)] = rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id];
drr_dT[INDEX( 5)] /= dT ;
drr_dT[INDEX( 5)] /= inv_Ts ;
//k07: 6
drr_dT[INDEX( 6)] = rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id];
drr_dT[INDEX( 6)] /= dT ;
drr_dT[INDEX( 6)] /= inv_Ts ;
//k08: 7
drr_dT[INDEX( 7)] = rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id];
drr_dT[INDEX( 7)] /= dT ;
drr_dT[INDEX( 7)] /= inv_Ts ;
//k09: 8
drr_dT[INDEX( 8)] = rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id];
drr_dT[INDEX( 8)] /= dT ;
drr_dT[INDEX( 8)] /= inv_Ts ;
//k10: 9
drr_dT[INDEX( 9)] = rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id];
drr_dT[INDEX( 9)] /= dT ;
drr_dT[INDEX( 9)] /= inv_Ts ;
//k11: 10
drr_dT[INDEX(10)] = rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id];
drr_dT[INDEX(10)] /= dT ;
drr_dT[INDEX(10)] /= inv_Ts ;
//k12: 11
drr_dT[INDEX(11)] = rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id];
drr_dT[INDEX(11)] /= dT ;
drr_dT[INDEX(11)] /= inv_Ts ;
//k13: 12
drr_dT[INDEX(12)] = rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id];
drr_dT[INDEX(12)] /= dT ;
drr_dT[INDEX(12)] /= inv_Ts ;
//k14: 13
drr_dT[INDEX(13)] = rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id];
drr_dT[INDEX(13)] /= dT ;
drr_dT[INDEX(13)] /= inv_Ts ;
//k15: 14
drr_dT[INDEX(14)] = rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id];
drr_dT[INDEX(14)] /= dT ;
drr_dT[INDEX(14)] /= inv_Ts ;
//k16: 15
drr_dT[INDEX(15)] = rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id];
drr_dT[INDEX(15)] /= dT ;
drr_dT[INDEX(15)] /= inv_Ts ;
//k17: 16
drr_dT[INDEX(16)] = rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id];
drr_dT[INDEX(16)] /= dT ;
drr_dT[INDEX(16)] /= inv_Ts ;
//k18: 17
drr_dT[INDEX(17)] = rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id];
drr_dT[INDEX(17)] /= dT ;
drr_dT[INDEX(17)] /= inv_Ts ;
//k19: 18
drr_dT[INDEX(18)] = rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id];
drr_dT[INDEX(18)] /= dT ;
drr_dT[INDEX(18)] /= inv_Ts ;
//k20: 19
//drr_dT[INDEX(19)] = rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id];
//drr_dT[INDEX(19)] /= dT ;
//drr_dT[INDEX(19)] /= inv_Ts ;
//k21: 20
drr_dT[INDEX(20)] = rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id];
drr_dT[INDEX(20)] /= dT ;
drr_dT[INDEX(20)] /= inv_Ts ;
//k22: 21
drr_dT[INDEX(21)] = rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id];
drr_dT[INDEX(21)] /= dT ;
drr_dT[INDEX(21)] /= inv_Ts ;
//k23: 22
//drr_dT[INDEX(22)] = rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id];
//drr_dT[INDEX(22)] /= dT ;
//drr_dT[INDEX(22)] /= inv_Ts ;
}
__device__ void dydt (const double t, const double pres, const double * __restrict__ y_in, double * __restrict__ dy, const mechanism_memory * d_mem) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
// int NSPECIES = 10;
const int NRATE = 23;
const int NCOOL = 26;
double * local_reaction_rates = d_mem->reaction_rates;
double * local_cooling_rates = d_mem->cooling_rates ;
// scale related piece
// double * y = d_mem->temp_array; // working space for scaling the variable back;
cvklu_data *rate_data = d_mem->chemistry_data;
// these should be retreieved from d_mem object
double T_local = d_mem->temperature[T_ID];
double Tge = d_mem->dTs_ge[T_ID];
const double mdensity = d_mem->density[T_ID];
const double inv_mdensity = 1.0 / mdensity;
const double h2_optical_depth_approx = d_mem->h2_optical_depth_approx[T_ID];
// scaling the input vector back to cgs units
#ifdef SCALE_INPUT
const double * __restrict__ scale = d_mem->scale;
// const double * __restrict__ inv_scale = d_mem->inv_scale;
#pragma unroll
for (int i = 0; i < 10; i++){
y_shared[S_INDEX(i)] = y_in[INDEX(i)]*scale[INDEX(i)];
// printf( "y_in[%d] = %0.5g; scale[%d] = %0.5g\n", i, y_in[INDEX(i)], i, scale[INDEX(i)] );
}
#else
#pragma unroll
for (int i = 0; i < 10; i++){
y_shared[S_INDEX(i)] = y_in[INDEX(i)];
}
#endif
evaluate_temperature ( &T_local, &Tge, y_shared, mdensity, rate_data );
interpolate_reaction_rates( local_reaction_rates, T_local, rate_data);
interpolate_cooling_rates ( local_cooling_rates , T_local, rate_data);
//# 0: H2_1
dy[INDEX(0)] = local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(21)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)];
//# 1: H2_2
dy[INDEX(1)] = local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] + local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)];
//# 2: H_1
dy[INDEX(2)] = -local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] + 2*local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(8)] + 2*local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + 2*local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] + 2*local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)] - 2*local_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)] - 2*local_reaction_rates[INDEX(21)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)];
//# 3: H_2
dy[INDEX(3)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] + local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)];
//# 4: H_m0
dy[INDEX(4)] = local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)];
//# 5: He_1
dy[INDEX(5)] = -local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)];
//# 6: He_2
dy[INDEX(6)] = local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)];
//# 7: He_3
dy[INDEX(7)] = local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)];
//# 8: de
dy[INDEX(8)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)];
//# 9: ge
dy[INDEX(9)] = -2.01588*y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(25)]*local_cooling_rates[INDEX(26)]*mdensity - y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) - y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(0)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(4)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(7)] - y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(5)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(2)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(1)]*local_cooling_rates[INDEX(26)]*pow(y_shared[S_INDEX(8)], 2) - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(6)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(3)]*local_cooling_rates[INDEX(26)]*pow(y_shared[S_INDEX(8)], 2) - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(9)] - y_shared[S_INDEX(7)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)]*(y_shared[S_INDEX(3)] + y_shared[S_INDEX(6)] + 4.0*y_shared[S_INDEX(7)]) - local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(18)]*y_shared[S_INDEX(8)]*(T_local - 2.73) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y_shared[S_INDEX(2)], 3)*local_cooling_rates[INDEX(20)]);
dy[INDEX(9)] *= inv_mdensity;
#ifdef SCALE_INPUT
// scaling the dydt vector back to code untis
#pragma unroll
for (int i = 0; i< 10; i++){
dy[INDEX(i)] /= scale[INDEX(i)];
}
#endif
/*
if ( T_ID == 0 ){
*d_mem->rhs_call += 1;
printf("t = %0.5g; rhs_call = %d\n", t, *d_mem->rhs_call );
}
*/
/*
if ( T_ID == 0 ){
printf("time = %0.5g, at temp = %0.5g\n", t, T_local);
for (int i = 0; i< 10; i++){
printf("from tid[%d]: dy[%d] = %0.5g, y = %0.5g at t = %0.5g \n", T_ID, i, dy[INDEX(i)], y_in[INDEX(i)], t);
}
}
*/
// printf(" \n");
// }
}
| f85e70da0067326b1f5fcb2b1ddd9585abef7032.cu | #include "header.cuh"
#include "gpu_memory.cuh"
extern __shared__ double y_shared[];
__device__ void interpolate_gamma( cvklu_data *rate_data, double T, double *gamma, double *dgamma_dT )
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
log_temp_out = log(T);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
*gamma = rate_data->g_gammaH2_1[bin_id] + Tdef * (rate_data->g_gammaH2_1[bin_id+1] - rate_data->g_gammaH2_1[bin_id]);
*dgamma_dT = rate_data->g_dgammaH2_1_dT[bin_id] + Tdef * (rate_data->g_dgammaH2_1_dT[bin_id+1] - rate_data->g_dgammaH2_1_dT[bin_id]);
}
__device__ void evaluate_temperature( double* T, double* dTs_ge, double *y, const double mdensity, cvklu_data *rate_data )
{
// iterate temperature to convergence
double t, tnew, tdiff;
double dge, dge_dT;
double gammaH2, dgammaH2_dT, _gammaH2_m1;
int count = 0;
int MAX_ITERATION = 100;
double gamma = 5./3.;
double _gamma_m1 = 1.0 / (gamma - 1.0);
double kb = 1.3806504e-16; // Boltzamann constant [erg/K]
// prepare t, tnew for the newton's iteration;
t = *T;
if (t != t) t = 1000.0;
tnew = 1.1*t;
tdiff = tnew - t;
while ( tdiff/ tnew > 0.001 ){
// We do Newton's Iteration to calculate the temperature
// Since gammaH2 is dependent on the temperature too!
interpolate_gamma( rate_data, t, &gammaH2, &dgammaH2_dT );
_gammaH2_m1 = 1.0 / (gammaH2 - 1.0);
dge_dT = t*kb*(-y[INDEX(0)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT - y[INDEX(1)]*_gammaH2_m1*_gammaH2_m1*dgammaH2_dT)/(mdensity)
+ kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1 + y[INDEX(4)]*_gamma_m1
+ y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity);
dge = t*kb*(y[INDEX(0)]*_gammaH2_m1 + y[INDEX(1)]*_gammaH2_m1 + y[INDEX(2)]*_gamma_m1 + y[INDEX(3)]*_gamma_m1
+ y[INDEX(4)]*_gamma_m1 + y[INDEX(5)]*_gamma_m1 + y[INDEX(6)]*_gamma_m1 + y[INDEX(7)]*_gamma_m1 + _gamma_m1*y[INDEX(8)])/(mdensity) - y[INDEX(9)];
//This is the change in ge for each iteration
tnew = t - dge/dge_dT;
count += 1;
tdiff = fabs(t - tnew);
t = tnew;
if (count > MAX_ITERATION){
printf("T[tid = %d] failed to converge (iteration: %d); at T = %0.3g \n", T_ID, count, tnew );
}
if ( t!= t && T_ID == 0){
printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", T_ID, t, count, y[INDEX(9)], gammaH2);
t = 1000.0;
for (int i = 0; i < 10; i++){
printf("y[S_INDEX(%d)] = %0.5g \n", i, y[S_INDEX(i)]);
}
break;
}
}
// update the temperature;
*T = t;
*dTs_ge = 1.0 / dge_dT;
// printf("T[tid = %d] is %0.5g, count = %d; ge = %0.5g, gamma_H2 = %0.5g \n", tid, t, count, y[INDEX(9)], gammaH2);
}
__device__ void interpolate_reaction_rates( double *reaction_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, invTs, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
invTs = 1.0 / temp_out;
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
reaction_rates_out[INDEX( 0)] = rate_data->r_k01[bin_id] + Tdef * (rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id]);
reaction_rates_out[INDEX( 1)] = rate_data->r_k02[bin_id] + Tdef * (rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id]);
reaction_rates_out[INDEX( 2)] = rate_data->r_k03[bin_id] + Tdef * (rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id]);
reaction_rates_out[INDEX( 3)] = rate_data->r_k04[bin_id] + Tdef * (rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id]);
reaction_rates_out[INDEX( 4)] = rate_data->r_k05[bin_id] + Tdef * (rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id]);
reaction_rates_out[INDEX( 5)] = rate_data->r_k06[bin_id] + Tdef * (rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id]);
reaction_rates_out[INDEX( 6)] = rate_data->r_k07[bin_id] + Tdef * (rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id]);
reaction_rates_out[INDEX( 7)] = rate_data->r_k08[bin_id] + Tdef * (rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id]);
reaction_rates_out[INDEX( 8)] = rate_data->r_k09[bin_id] + Tdef * (rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id]);
reaction_rates_out[INDEX( 9)] = rate_data->r_k10[bin_id] + Tdef * (rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id]);
reaction_rates_out[INDEX(10)] = rate_data->r_k11[bin_id] + Tdef * (rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id]);
reaction_rates_out[INDEX(11)] = rate_data->r_k12[bin_id] + Tdef * (rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id]);
reaction_rates_out[INDEX(12)] = rate_data->r_k13[bin_id] + Tdef * (rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id]);
reaction_rates_out[INDEX(13)] = rate_data->r_k14[bin_id] + Tdef * (rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id]);
reaction_rates_out[INDEX(14)] = rate_data->r_k15[bin_id] + Tdef * (rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id]);
reaction_rates_out[INDEX(15)] = rate_data->r_k16[bin_id] + Tdef * (rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id]);
reaction_rates_out[INDEX(16)] = rate_data->r_k17[bin_id] + Tdef * (rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id]);
reaction_rates_out[INDEX(17)] = rate_data->r_k18[bin_id] + Tdef * (rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id]);
reaction_rates_out[INDEX(18)] = rate_data->r_k19[bin_id] + Tdef * (rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id]);
//reaction_rates_out[INDEX(19)] = rate_data->r_k20[bin_id] + Tdef * (rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id]);
reaction_rates_out[INDEX(20)] = rate_data->r_k21[bin_id] + Tdef * (rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id]);
reaction_rates_out[INDEX(21)] = rate_data->r_k22[bin_id] + Tdef * (rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id]);
//reaction_rates_out[INDEX(22)] = rate_data->r_k23[bin_id] + Tdef * (rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id]);
}
__device__ void interpolate_cooling_rates( double *cooling_rates_out, double temp_out, cvklu_data *rate_data)
{
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
/*
if (T_ID == 0){
printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out);
}
*/
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
// rate_out is a long 1D array
// NRATE is the number of rate required by the solver network
cooling_rates_out[INDEX( 0)] = rate_data->c_ceHI_ceHI[bin_id] + Tdef * (rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id]);
cooling_rates_out[INDEX( 1)] = rate_data->c_ceHeI_ceHeI[bin_id] + Tdef * (rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id]);
cooling_rates_out[INDEX( 2)] = rate_data->c_ceHeII_ceHeII[bin_id] + Tdef * (rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id]);
cooling_rates_out[INDEX( 3)] = rate_data->c_ciHeIS_ciHeIS[bin_id] + Tdef * (rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id]);
cooling_rates_out[INDEX( 4)] = rate_data->c_ciHI_ciHI[bin_id] + Tdef * (rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id]);
cooling_rates_out[INDEX( 5)] = rate_data->c_ciHeI_ciHeI[bin_id] + Tdef * (rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id]);
cooling_rates_out[INDEX( 6)] = rate_data->c_ciHeII_ciHeII[bin_id] + Tdef * (rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id]);
cooling_rates_out[INDEX( 7)] = rate_data->c_reHII_reHII[bin_id] + Tdef * (rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id]);
cooling_rates_out[INDEX( 8)] = rate_data->c_reHeII1_reHeII1[bin_id] + Tdef * (rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id]);
cooling_rates_out[INDEX( 9)] = rate_data->c_reHeII2_reHeII2[bin_id] + Tdef * (rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id]);
cooling_rates_out[INDEX(10)] = rate_data->c_reHeIII_reHeIII[bin_id] + Tdef * (rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id]);
cooling_rates_out[INDEX(11)] = rate_data->c_brem_brem[bin_id] + Tdef * (rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id]);
cooling_rates_out[INDEX(12)] = rate_data->c_gloverabel08_gaHI[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id]);
cooling_rates_out[INDEX(13)] = rate_data->c_gloverabel08_gaH2[bin_id] + Tdef * (rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id]);
cooling_rates_out[INDEX(14)] = rate_data->c_gloverabel08_gaHe[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id]);
cooling_rates_out[INDEX(15)] = rate_data->c_gloverabel08_gaHp[bin_id] + Tdef * (rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id]);
cooling_rates_out[INDEX(16)] = rate_data->c_gloverabel08_gael[bin_id] + Tdef * (rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id]);
cooling_rates_out[INDEX(17)] = rate_data->c_gloverabel08_h2lte[bin_id] + Tdef * (rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id]);
cooling_rates_out[INDEX(18)] = rate_data->c_compton_comp_[bin_id] + Tdef * (rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id]);
cooling_rates_out[INDEX(19)] = rate_data->c_gammah_gammah[bin_id] + Tdef * (rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id]);
cooling_rates_out[INDEX(20)] = rate_data->c_h2formation_h2mheat[bin_id] + Tdef * (rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id]);
cooling_rates_out[INDEX(21)] = rate_data->c_h2formation_h2mcool[bin_id] + Tdef * (rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id]);
cooling_rates_out[INDEX(22)] = rate_data->c_h2formation_ncrn[bin_id] + Tdef * (rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id]);
cooling_rates_out[INDEX(23)] = rate_data->c_h2formation_ncrd1[bin_id] + Tdef * (rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id]);
cooling_rates_out[INDEX(24)] = rate_data->c_h2formation_ncrd2[bin_id] + Tdef * (rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id]);
cooling_rates_out[INDEX(25)] = rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
cooling_rates_out[INDEX(26)] = 1.0; //rate_data->c_cie_cooling_cieco[bin_id] + Tdef * (rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id]);
}
__device__ void interpolate_dcrate_dT(double *dcr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, inv_Ts, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
inv_Ts = temp_out;
//ceHI_ceHI: 0
dcr_dT[INDEX( 0)] = rate_data->c_ceHI_ceHI[bin_id+1] - rate_data->c_ceHI_ceHI[bin_id];
dcr_dT[INDEX( 0)] /= dT ;
dcr_dT[INDEX( 0)] /= inv_Ts ;
//ceHeI_ceHeI: 1
dcr_dT[INDEX( 1)] = rate_data->c_ceHeI_ceHeI[bin_id+1] - rate_data->c_ceHeI_ceHeI[bin_id];
dcr_dT[INDEX( 1)] /= dT ;
dcr_dT[INDEX( 1)] /= inv_Ts ;
//ceHeII_ceHeII: 2
dcr_dT[INDEX( 2)] = rate_data->c_ceHeII_ceHeII[bin_id+1] - rate_data->c_ceHeII_ceHeII[bin_id];
dcr_dT[INDEX( 2)] /= dT ;
dcr_dT[INDEX( 2)] /= inv_Ts ;
//ciHeIS_ciHeIS: 3
dcr_dT[INDEX( 3)] = rate_data->c_ciHeIS_ciHeIS[bin_id+1] - rate_data->c_ciHeIS_ciHeIS[bin_id];
dcr_dT[INDEX( 3)] /= dT ;
dcr_dT[INDEX( 3)] /= inv_Ts ;
//ciHI_ciHI: 4
dcr_dT[INDEX( 4)] = rate_data->c_ciHI_ciHI[bin_id+1] - rate_data->c_ciHI_ciHI[bin_id];
dcr_dT[INDEX( 4)] /= dT ;
dcr_dT[INDEX( 4)] /= inv_Ts ;
//ciHeI_ciHeI: 5
dcr_dT[INDEX( 5)] = rate_data->c_ciHeI_ciHeI[bin_id+1] - rate_data->c_ciHeI_ciHeI[bin_id];
dcr_dT[INDEX( 5)] /= dT ;
dcr_dT[INDEX( 5)] /= inv_Ts ;
//ciHeII_ciHeII: 6
dcr_dT[INDEX( 6)] = rate_data->c_ciHeII_ciHeII[bin_id+1] - rate_data->c_ciHeII_ciHeII[bin_id];
dcr_dT[INDEX( 6)] /= dT ;
dcr_dT[INDEX( 6)] /= inv_Ts ;
//reHII_reHII: 7
dcr_dT[INDEX( 7)] = rate_data->c_reHII_reHII[bin_id+1] - rate_data->c_reHII_reHII[bin_id];
dcr_dT[INDEX( 7)] /= dT ;
dcr_dT[INDEX( 7)] /= inv_Ts ;
//reHeII1_reHeII1: 8
dcr_dT[INDEX( 8)] = rate_data->c_reHeII1_reHeII1[bin_id+1] - rate_data->c_reHeII1_reHeII1[bin_id];
dcr_dT[INDEX( 8)] /= dT ;
dcr_dT[INDEX( 8)] /= inv_Ts ;
//reHeII2_reHeII2: 9
dcr_dT[INDEX( 9)] = rate_data->c_reHeII2_reHeII2[bin_id+1] - rate_data->c_reHeII2_reHeII2[bin_id];
dcr_dT[INDEX( 9)] /= dT ;
dcr_dT[INDEX( 9)] /= inv_Ts ;
//reHeIII_reHeIII: 10
dcr_dT[INDEX(10)] = rate_data->c_reHeIII_reHeIII[bin_id+1] - rate_data->c_reHeIII_reHeIII[bin_id];
dcr_dT[INDEX(10)] /= dT ;
dcr_dT[INDEX(10)] /= inv_Ts ;
//brem_brem: 11
dcr_dT[INDEX(11)] = rate_data->c_brem_brem[bin_id+1] - rate_data->c_brem_brem[bin_id];
dcr_dT[INDEX(11)] /= dT ;
dcr_dT[INDEX(11)] /= inv_Ts ;
//gloverabel08_gaHI: 12
dcr_dT[INDEX(12)] = rate_data->c_gloverabel08_gaHI[bin_id+1] - rate_data->c_gloverabel08_gaHI[bin_id];
dcr_dT[INDEX(12)] /= dT ;
dcr_dT[INDEX(12)] /= inv_Ts ;
//gloverabel08_gaH2: 13
dcr_dT[INDEX(13)] = rate_data->c_gloverabel08_gaH2[bin_id+1] - rate_data->c_gloverabel08_gaH2[bin_id];
dcr_dT[INDEX(13)] /= dT ;
dcr_dT[INDEX(13)] /= inv_Ts ;
//gloverabel08_gaHe: 14
dcr_dT[INDEX(14)] = rate_data->c_gloverabel08_gaHe[bin_id+1] - rate_data->c_gloverabel08_gaHe[bin_id];
dcr_dT[INDEX(14)] /= dT ;
dcr_dT[INDEX(14)] /= inv_Ts ;
//gloverabel08_gaHp: 15
dcr_dT[INDEX(15)] = rate_data->c_gloverabel08_gaHp[bin_id+1] - rate_data->c_gloverabel08_gaHp[bin_id];
dcr_dT[INDEX(15)] /= dT ;
dcr_dT[INDEX(15)] /= inv_Ts ;
//gloverabel08_gael: 16
dcr_dT[INDEX(16)] = rate_data->c_gloverabel08_gael[bin_id+1] - rate_data->c_gloverabel08_gael[bin_id];
dcr_dT[INDEX(16)] /= dT ;
dcr_dT[INDEX(16)] /= inv_Ts ;
//gloverabel08_h2lte: 17
dcr_dT[INDEX(17)] = rate_data->c_gloverabel08_h2lte[bin_id+1] - rate_data->c_gloverabel08_h2lte[bin_id];
dcr_dT[INDEX(17)] /= dT ;
dcr_dT[INDEX(17)] /= inv_Ts ;
//compton_comp_: 18
dcr_dT[INDEX(18)] = rate_data->c_compton_comp_[bin_id+1] - rate_data->c_compton_comp_[bin_id];
dcr_dT[INDEX(18)] /= dT ;
dcr_dT[INDEX(18)] /= inv_Ts ;
//gammah_gammah: 19
dcr_dT[INDEX(19)] = rate_data->c_gammah_gammah[bin_id+1] - rate_data->c_gammah_gammah[bin_id];
dcr_dT[INDEX(19)] /= dT ;
dcr_dT[INDEX(19)] /= inv_Ts ;
//h2formation_h2mheat: 20
dcr_dT[INDEX(20)] = rate_data->c_h2formation_h2mheat[bin_id+1] - rate_data->c_h2formation_h2mheat[bin_id];
dcr_dT[INDEX(20)] /= dT ;
dcr_dT[INDEX(20)] /= inv_Ts ;
//h2formation_h2mcool: 21
dcr_dT[INDEX(21)] = rate_data->c_h2formation_h2mcool[bin_id+1] - rate_data->c_h2formation_h2mcool[bin_id];
dcr_dT[INDEX(21)] /= dT ;
dcr_dT[INDEX(21)] /= inv_Ts ;
//h2formation_ncrn: 22
dcr_dT[INDEX(22)] = rate_data->c_h2formation_ncrn[bin_id+1] - rate_data->c_h2formation_ncrn[bin_id];
dcr_dT[INDEX(22)] /= dT ;
dcr_dT[INDEX(22)] /= inv_Ts ;
//h2formation_ncrd1: 23
dcr_dT[INDEX(23)] = rate_data->c_h2formation_ncrd1[bin_id+1] - rate_data->c_h2formation_ncrd1[bin_id];
dcr_dT[INDEX(23)] /= dT ;
dcr_dT[INDEX(23)] /= inv_Ts ;
//h2formation_ncrd2: 24
dcr_dT[INDEX(24)] = rate_data->c_h2formation_ncrd2[bin_id+1] - rate_data->c_h2formation_ncrd2[bin_id];
dcr_dT[INDEX(24)] /= dT ;
dcr_dT[INDEX(24)] /= inv_Ts ;
//cie_cooling_cieco: 25
dcr_dT[INDEX(25)] = rate_data->c_cie_cooling_cieco[bin_id+1] - rate_data->c_cie_cooling_cieco[bin_id];
dcr_dT[INDEX(25)] /= dT ;
dcr_dT[INDEX(25)] /= inv_Ts ;
//cie_optical_depth_approx: 26
//dcr_dT[INDEX(26)] = rate_data->c_cie_optical_depth_approx[bin_id+1] - rate_data->c_cie_optical_depth_approx[bin_id];
//dcr_dT[INDEX(26)] /= dT ;
//dcr_dT[INDEX(26)] /= inv_Ts ;
dcr_dT[INDEX(26)] = 0.0;
}
__device__ void interpolate_drrate_dT(double *drr_dT, const double temp_out, cvklu_data *rate_data ){
int tid, bin_id, zbin_id;
double t1, t2;
double Tdef, dT, inv_Ts, log_temp_out;
int no_photo = 0;
double lb = log(rate_data->bounds[0]);
tid = threadIdx.x + blockDim.x * blockIdx.x;
log_temp_out = log(temp_out);
bin_id = (int) ( rate_data->idbin * ( log_temp_out - lb ) );
if ( bin_id <= 0) {
bin_id = 0;
} else if ( bin_id >= rate_data->nbins) {
bin_id = rate_data->nbins - 1;
}
//printf( "bin_id = %d; temp_out = %0.5g \n", bin_id, temp_out[tid]);
t1 = (lb + (bin_id ) * rate_data->dbin);
t2 = (lb + (bin_id + 1) * rate_data->dbin);
Tdef = (log_temp_out - t1)/(t2 - t1);
dT = t2 - t1;
inv_Ts = temp_out;
//k01: 0
drr_dT[INDEX( 0)] = rate_data->r_k01[bin_id+1] - rate_data->r_k01[bin_id];
drr_dT[INDEX( 0)] /= dT ;
drr_dT[INDEX( 0)] /= inv_Ts ;
//k02: 1
drr_dT[INDEX( 1)] = rate_data->r_k02[bin_id+1] - rate_data->r_k02[bin_id];
drr_dT[INDEX( 1)] /= dT ;
drr_dT[INDEX( 1)] /= inv_Ts ;
//k03: 2
drr_dT[INDEX( 2)] = rate_data->r_k03[bin_id+1] - rate_data->r_k03[bin_id];
drr_dT[INDEX( 2)] /= dT ;
drr_dT[INDEX( 2)] /= inv_Ts ;
//k04: 3
drr_dT[INDEX( 3)] = rate_data->r_k04[bin_id+1] - rate_data->r_k04[bin_id];
drr_dT[INDEX( 3)] /= dT ;
drr_dT[INDEX( 3)] /= inv_Ts ;
//k05: 4
drr_dT[INDEX( 4)] = rate_data->r_k05[bin_id+1] - rate_data->r_k05[bin_id];
drr_dT[INDEX( 4)] /= dT ;
drr_dT[INDEX( 4)] /= inv_Ts ;
//k06: 5
drr_dT[INDEX( 5)] = rate_data->r_k06[bin_id+1] - rate_data->r_k06[bin_id];
drr_dT[INDEX( 5)] /= dT ;
drr_dT[INDEX( 5)] /= inv_Ts ;
//k07: 6
drr_dT[INDEX( 6)] = rate_data->r_k07[bin_id+1] - rate_data->r_k07[bin_id];
drr_dT[INDEX( 6)] /= dT ;
drr_dT[INDEX( 6)] /= inv_Ts ;
//k08: 7
drr_dT[INDEX( 7)] = rate_data->r_k08[bin_id+1] - rate_data->r_k08[bin_id];
drr_dT[INDEX( 7)] /= dT ;
drr_dT[INDEX( 7)] /= inv_Ts ;
//k09: 8
drr_dT[INDEX( 8)] = rate_data->r_k09[bin_id+1] - rate_data->r_k09[bin_id];
drr_dT[INDEX( 8)] /= dT ;
drr_dT[INDEX( 8)] /= inv_Ts ;
//k10: 9
drr_dT[INDEX( 9)] = rate_data->r_k10[bin_id+1] - rate_data->r_k10[bin_id];
drr_dT[INDEX( 9)] /= dT ;
drr_dT[INDEX( 9)] /= inv_Ts ;
//k11: 10
drr_dT[INDEX(10)] = rate_data->r_k11[bin_id+1] - rate_data->r_k11[bin_id];
drr_dT[INDEX(10)] /= dT ;
drr_dT[INDEX(10)] /= inv_Ts ;
//k12: 11
drr_dT[INDEX(11)] = rate_data->r_k12[bin_id+1] - rate_data->r_k12[bin_id];
drr_dT[INDEX(11)] /= dT ;
drr_dT[INDEX(11)] /= inv_Ts ;
//k13: 12
drr_dT[INDEX(12)] = rate_data->r_k13[bin_id+1] - rate_data->r_k13[bin_id];
drr_dT[INDEX(12)] /= dT ;
drr_dT[INDEX(12)] /= inv_Ts ;
//k14: 13
drr_dT[INDEX(13)] = rate_data->r_k14[bin_id+1] - rate_data->r_k14[bin_id];
drr_dT[INDEX(13)] /= dT ;
drr_dT[INDEX(13)] /= inv_Ts ;
//k15: 14
drr_dT[INDEX(14)] = rate_data->r_k15[bin_id+1] - rate_data->r_k15[bin_id];
drr_dT[INDEX(14)] /= dT ;
drr_dT[INDEX(14)] /= inv_Ts ;
//k16: 15
drr_dT[INDEX(15)] = rate_data->r_k16[bin_id+1] - rate_data->r_k16[bin_id];
drr_dT[INDEX(15)] /= dT ;
drr_dT[INDEX(15)] /= inv_Ts ;
//k17: 16
drr_dT[INDEX(16)] = rate_data->r_k17[bin_id+1] - rate_data->r_k17[bin_id];
drr_dT[INDEX(16)] /= dT ;
drr_dT[INDEX(16)] /= inv_Ts ;
//k18: 17
drr_dT[INDEX(17)] = rate_data->r_k18[bin_id+1] - rate_data->r_k18[bin_id];
drr_dT[INDEX(17)] /= dT ;
drr_dT[INDEX(17)] /= inv_Ts ;
//k19: 18
drr_dT[INDEX(18)] = rate_data->r_k19[bin_id+1] - rate_data->r_k19[bin_id];
drr_dT[INDEX(18)] /= dT ;
drr_dT[INDEX(18)] /= inv_Ts ;
//k20: 19
//drr_dT[INDEX(19)] = rate_data->r_k20[bin_id+1] - rate_data->r_k20[bin_id];
//drr_dT[INDEX(19)] /= dT ;
//drr_dT[INDEX(19)] /= inv_Ts ;
//k21: 20
drr_dT[INDEX(20)] = rate_data->r_k21[bin_id+1] - rate_data->r_k21[bin_id];
drr_dT[INDEX(20)] /= dT ;
drr_dT[INDEX(20)] /= inv_Ts ;
//k22: 21
drr_dT[INDEX(21)] = rate_data->r_k22[bin_id+1] - rate_data->r_k22[bin_id];
drr_dT[INDEX(21)] /= dT ;
drr_dT[INDEX(21)] /= inv_Ts ;
//k23: 22
//drr_dT[INDEX(22)] = rate_data->r_k23[bin_id+1] - rate_data->r_k23[bin_id];
//drr_dT[INDEX(22)] /= dT ;
//drr_dT[INDEX(22)] /= inv_Ts ;
}
__device__ void dydt (const double t, const double pres, const double * __restrict__ y_in, double * __restrict__ dy, const mechanism_memory * d_mem) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
// int NSPECIES = 10;
const int NRATE = 23;
const int NCOOL = 26;
double * local_reaction_rates = d_mem->reaction_rates;
double * local_cooling_rates = d_mem->cooling_rates ;
// scale related piece
// double * y = d_mem->temp_array; // working space for scaling the variable back;
cvklu_data *rate_data = d_mem->chemistry_data;
// these should be retreieved from d_mem object
double T_local = d_mem->temperature[T_ID];
double Tge = d_mem->dTs_ge[T_ID];
const double mdensity = d_mem->density[T_ID];
const double inv_mdensity = 1.0 / mdensity;
const double h2_optical_depth_approx = d_mem->h2_optical_depth_approx[T_ID];
// scaling the input vector back to cgs units
#ifdef SCALE_INPUT
const double * __restrict__ scale = d_mem->scale;
// const double * __restrict__ inv_scale = d_mem->inv_scale;
#pragma unroll
for (int i = 0; i < 10; i++){
y_shared[S_INDEX(i)] = y_in[INDEX(i)]*scale[INDEX(i)];
// printf( "y_in[%d] = %0.5g; scale[%d] = %0.5g\n", i, y_in[INDEX(i)], i, scale[INDEX(i)] );
}
#else
#pragma unroll
for (int i = 0; i < 10; i++){
y_shared[S_INDEX(i)] = y_in[INDEX(i)];
}
#endif
evaluate_temperature ( &T_local, &Tge, y_shared, mdensity, rate_data );
interpolate_reaction_rates( local_reaction_rates, T_local, rate_data);
interpolate_cooling_rates ( local_cooling_rates , T_local, rate_data);
//# 0: H2_1
dy[INDEX(0)] = local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(21)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)];
//# 1: H2_2
dy[INDEX(1)] = local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] + local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)];
//# 2: H_1
dy[INDEX(2)] = -local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] + 2*local_reaction_rates[INDEX(11)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(8)] + 2*local_reaction_rates[INDEX(12)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + 2*local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] + 2*local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)] - 2*local_reaction_rates[INDEX(20)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)] - 2*local_reaction_rates[INDEX(21)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(2)];
//# 3: H_2
dy[INDEX(3)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(8)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(3)] + local_reaction_rates[INDEX(9)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(2)] - local_reaction_rates[INDEX(10)]*y_shared[S_INDEX(0)]*y_shared[S_INDEX(3)] - local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)];
//# 4: H_m0
dy[INDEX(4)] = local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(15)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(18)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(4)];
//# 5: He_1
dy[INDEX(5)] = -local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)];
//# 6: He_2
dy[INDEX(6)] = local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)];
//# 7: He_3
dy[INDEX(7)] = local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)];
//# 8: de
dy[INDEX(8)] = local_reaction_rates[INDEX(0)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(1)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(2)]*y_shared[S_INDEX(5)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(3)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(4)]*y_shared[S_INDEX(6)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(5)]*y_shared[S_INDEX(7)]*y_shared[S_INDEX(8)] - local_reaction_rates[INDEX(6)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(7)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(13)]*y_shared[S_INDEX(4)]*y_shared[S_INDEX(8)] + local_reaction_rates[INDEX(14)]*y_shared[S_INDEX(2)]*y_shared[S_INDEX(4)] + local_reaction_rates[INDEX(16)]*y_shared[S_INDEX(3)]*y_shared[S_INDEX(4)] - local_reaction_rates[INDEX(17)]*y_shared[S_INDEX(1)]*y_shared[S_INDEX(8)];
//# 9: ge
dy[INDEX(9)] = -2.01588*y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(25)]*local_cooling_rates[INDEX(26)]*mdensity - y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(17)]*h2_optical_depth_approx/(local_cooling_rates[INDEX(17)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(13)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(12)] + y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(15)] + y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(14)] + y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(16)]) + 1.0) - y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(0)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(4)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(3)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(7)] - y_shared[S_INDEX(5)]*local_cooling_rates[INDEX(5)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(2)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(1)]*local_cooling_rates[INDEX(26)]*pow(y_shared[S_INDEX(8)], 2) - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(6)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(3)]*local_cooling_rates[INDEX(26)]*pow(y_shared[S_INDEX(8)], 2) - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(8)] - y_shared[S_INDEX(6)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(9)] - y_shared[S_INDEX(7)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)]*local_cooling_rates[INDEX(10)] - local_cooling_rates[INDEX(11)]*local_cooling_rates[INDEX(26)]*y_shared[S_INDEX(8)]*(y_shared[S_INDEX(3)] + y_shared[S_INDEX(6)] + 4.0*y_shared[S_INDEX(7)]) - local_cooling_rates[INDEX(26)]*local_cooling_rates[INDEX(18)]*y_shared[S_INDEX(8)]*(T_local - 2.73) + 0.5*1.0/(local_cooling_rates[INDEX(22)]/(y_shared[S_INDEX(0)]*local_cooling_rates[INDEX(24)] + y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(23)]) + 1.0)*(-y_shared[S_INDEX(0)]*y_shared[S_INDEX(2)]*local_cooling_rates[INDEX(21)] + pow(y_shared[S_INDEX(2)], 3)*local_cooling_rates[INDEX(20)]);
dy[INDEX(9)] *= inv_mdensity;
#ifdef SCALE_INPUT
// scaling the dydt vector back to code untis
#pragma unroll
for (int i = 0; i< 10; i++){
dy[INDEX(i)] /= scale[INDEX(i)];
}
#endif
/*
if ( T_ID == 0 ){
*d_mem->rhs_call += 1;
printf("t = %0.5g; rhs_call = %d\n", t, *d_mem->rhs_call );
}
*/
/*
if ( T_ID == 0 ){
printf("time = %0.5g, at temp = %0.5g\n", t, T_local);
for (int i = 0; i< 10; i++){
printf("from tid[%d]: dy[%d] = %0.5g, y = %0.5g at t = %0.5g \n", T_ID, i, dy[INDEX(i)], y_in[INDEX(i)], t);
}
}
*/
// printf(" \n");
// }
}
|
ebbe7f2c09de87cadfa2ca486fa4b71182d123fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <limits>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask,
const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
#endif // USE_ROCM
template<typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.mutable_gpu_data());
greentea_gpu_rng_uniform(this->device_->id(), count, mask, 0);
// set thresholds
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_forward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle(mask, &ctx), uint_thres_, scale_,
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
}
#endif // USE_GREENTEA
}
}
#ifdef USE_ROCM
template<typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask,
const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
#endif // USE_ROCM
template<typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (this->phase_ == TRAIN) {
const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_
.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.gpu_data());
const int count = bottom[0]->count();
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_backward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle(mask, &ctx), uint_thres_, scale_,
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(top[0]->count(), (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0, &ctx);
}
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
| ebbe7f2c09de87cadfa2ca486fa4b71182d123fd.cu | #include <algorithm>
#include <limits>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask,
const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
#endif // USE_CUDA
template<typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.mutable_gpu_data());
greentea_gpu_rng_uniform(this->device_->id(), count, mask, 0);
// set thresholds
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_forward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle(mask, &ctx), uint_thres_, scale_,
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(count, (cl_mem) bottom_data, 0, (cl_mem) top_data, 0,
&ctx);
}
#endif // USE_GREENTEA
}
}
#ifdef USE_CUDA
template<typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask,
const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
#endif // USE_CUDA
template<typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (this->phase_ == TRAIN) {
const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_
.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
if (this->phase_ == TRAIN) {
cl_mem mask = (cl_mem) (rand_vec_.gpu_data());
const int count = bottom[0]->count();
viennacl::ocl::kernel &oclk_dropout = program.get_kernel(
CL_KERNEL_SELECT("dropout_backward"));
viennacl::ocl::enqueue(
oclk_dropout(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle(mask, &ctx), uint_thres_, scale_,
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
} else {
greentea_copy<Dtype>(top[0]->count(), (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0, &ctx);
}
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
|
f4311663a3442ff9d9f2351266d0fefa530866f1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "delta_hidden.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *prime_ji = NULL;
hipMalloc(&prime_ji, XSIZE*YSIZE);
float *delta_i = NULL;
hipMalloc(&delta_i, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
delta_hidden), dim3(gridBlock),dim3(threadBlock), 0, 0, prime_ji,delta_i);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
delta_hidden), dim3(gridBlock),dim3(threadBlock), 0, 0, prime_ji,delta_i);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
delta_hidden), dim3(gridBlock),dim3(threadBlock), 0, 0, prime_ji,delta_i);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f4311663a3442ff9d9f2351266d0fefa530866f1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "delta_hidden.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *prime_ji = NULL;
cudaMalloc(&prime_ji, XSIZE*YSIZE);
float *delta_i = NULL;
cudaMalloc(&delta_i, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
delta_hidden<<<gridBlock,threadBlock>>>(prime_ji,delta_i);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
delta_hidden<<<gridBlock,threadBlock>>>(prime_ji,delta_i);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
delta_hidden<<<gridBlock,threadBlock>>>(prime_ji,delta_i);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fe1196f743ea8f4150cf90b457376df85bf05849.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gGetValueByKey(float* d_in, float* d_out, int* indeces, int n) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < n) {
int index = indeces[tid];
d_out[tid] = d_in[index];
}
} | fe1196f743ea8f4150cf90b457376df85bf05849.cu | #include "includes.h"
__global__ void gGetValueByKey(float* d_in, float* d_out, int* indeces, int n) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < n) {
int index = indeces[tid];
d_out[tid] = d_in[index];
}
} |
107e7693cf23a95ca1c01d6642da0fa1e01e9edb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[56,1,1] --blockDim=[256,1,1]
template<class T>
struct SharedMemory
{
__device__ __attribute__((always_inline)) inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ __attribute__((always_inline)) inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
template<typename T>
__device__ static __attribute__((always_inline)) T rsqrt_T(T x)
{
return rsqrt(x);
}
template<>
__device__ static __attribute__((always_inline)) float rsqrt_T<float>(float x)
{
return rsqrtf(x);
}
template <typename T> struct vec3
{
typedef float Type;
}; // dummy
template <> struct vec3<float>
{
typedef float3 Type;
};
template <> struct vec3<double>
{
typedef double3 Type;
};
template <typename T> struct vec4
{
typedef float Type;
}; // dummy
template <> struct vec4<float>
{
typedef float4 Type;
};
template <> struct vec4<double>
{
typedef double4 Type;
};
__constant__ float softeningSquared;
__constant__ double softeningSquared_fp64;
template <typename T>
__device__ static __attribute__((always_inline)) T getSofteningSquared()
{
return softeningSquared;
}
template <>
__device__ static __attribute__((always_inline)) double getSofteningSquared<double>()
{
return softeningSquared_fp64;
}
// Macros to simplify shared memory addressing
#define SX(i) sharedPos[i+blockDim.x*threadIdx.y]
// This macro is only used when multithreadBodies is true (below)
#define SX_SUM(i,j) sharedPos[i+blockDim.x*j]
template <typename T>
__device__ static __attribute__((always_inline)) typename vec3<T>::Type
bodyBodyInteraction(typename vec3<T>::Type ai,
typename vec4<T>::Type bi,
typename vec4<T>::Type bj)
{
typename vec3<T>::Type r;
// r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
T distSqr = r.x * r.x + r.y * r.y + r.z * r.z;
distSqr += getSofteningSquared<T>();
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
T invDist = rsqrt_T(distSqr);
T invDistCube = invDist * invDist * invDist;
// s = m_j * invDistCube [1 FLOP]
T s = bj.w * invDistCube;
// a_i = a_i + s * r_ij [6 FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
// This is the "tile_calculation" function from the GPUG3 article.
template <typename T>
__device__ static __attribute__((always_inline)) typename vec3<T>::Type
gravitation(typename vec4<T>::Type iPos,
typename vec3<T>::Type accel)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
// The CUDA 1.1 compiler cannot determine that i is not going to
// overflow in the loop below. Therefore if int is used on 64-bit linux
// or windows (or long instead of long long on win64), the compiler
// generates suboptimal code. Therefore we use long long on win64 and
// long on everything else. (Workaround for Bug ID 347697)
#ifdef _Win64
unsigned long long j = 0;
#else
unsigned long j = 0;
#endif
// Here we unroll the loop to reduce bookkeeping instruction overhead
// 32x unrolling seems to provide best performance
// Note that having an unsigned int loop counter and an unsigned
// long index helps the compiler generate efficient code on 64-bit
// OSes. The compiler can't assume the 64-bit index won't overflow
// so it incurs extra integer operations. This is a standard issue
// in porting 32-bit code to 64-bit OSes.
#pragma unroll 32
for (unsigned int counter = 0; counter < blockDim.x; counter++)
{
accel = bodyBodyInteraction<T>(accel, iPos, SX(j++));
}
return accel;
}
// WRAP is used to force each block to start working on a different
// chunk (and wrap around back to the beginning of the array) so that
// not all multiprocessors try to read the same memory locations at
// once.
#define WRAP(x,m) (((x)<(m))?(x):((x)-(m))) // Mod without divide, works on values from 0 up to 2m
template <typename T, bool multithreadBodies>
__device__ static __attribute__((always_inline)) typename vec3<T>::Type
computeBodyAccel(typename vec4<T>::Type bodyPos,
typename vec4<T>::Type *positions,
int numBodies)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f};
int p = blockDim.x;
int q = blockDim.y;
int n = numBodies;
int numTiles = n / (p * q);
extern __shared__ int __smem[]; // ALLY: Added to allow invariants to talk about this
for (int tile = blockIdx.y;
__global_invariant(__implies(__enabled() & __same_group, __uniform_int(tile))),
__invariant(__no_read(__smem)),
tile < numTiles + blockIdx.y; tile++)
{
sharedPos[threadIdx.x+blockDim.x*threadIdx.y] =
multithreadBodies ?
positions[WRAP(blockIdx.x + q * tile + threadIdx.y, gridDim.x) * p + threadIdx.x] :
positions[WRAP(blockIdx.x + tile, gridDim.x) * p + threadIdx.x];
__syncthreads();
// This is the "tile_calculation" function from the GPUG3 article.
acc = gravitation<T>(bodyPos, acc);
__syncthreads();
}
// When the numBodies / thread block size is < # multiprocessors (16 on G80), the GPU is
// underutilized. For example, with a 256 threads per block and 1024 bodies, there will only
// be 4 thread blocks, so the GPU will only be 25% utilized. To improve this, we use multiple
// threads per body. We still can use blocks of 256 threads, but they are arranged in q rows
// of p threads each. Each thread processes 1/q of the forces that affect each body, and then
// 1/q of the threads (those with threadIdx.y==0) add up the partial sums from the other
// threads for that body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4 threads per body and 256
// threads per block. There will be n/p = 16 blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads per body is greater
// than one, so that when it is not we don't have to execute the more complex code required!
if (multithreadBodies)
{
SX_SUM(threadIdx.x, threadIdx.y).x = acc.x;
SX_SUM(threadIdx.x, threadIdx.y).y = acc.y;
SX_SUM(threadIdx.x, threadIdx.y).z = acc.z;
__syncthreads();
// Save the result in global memory for the integration step
if (threadIdx.y == 0)
{
for (int i = 1; i < blockDim.y; i++)
{
acc.x += SX_SUM(threadIdx.x,i).x;
acc.y += SX_SUM(threadIdx.x,i).y;
acc.z += SX_SUM(threadIdx.x,i).z;
}
}
}
return acc;
}
template<typename T, bool multithreadBodies>
__global__ void
integrateBodies(typename vec4<T>::Type *newPos,
typename vec4<T>::Type *oldPos,
typename vec4<T>::Type *vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int totalNumBodies);
template
__global__ void
integrateBodies<float, false>(
vec4<float>::Type *newPos,
vec4<float>::Type *oldPos,
vec4<float>::Type *vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int totalNumBodies);
template<typename T, bool multithreadBodies>
__global__ void
integrateBodies(typename vec4<T>::Type *newPos,
typename vec4<T>::Type *oldPos,
typename vec4<T>::Type *vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int totalNumBodies)
{
__requires(deviceNumBodies == 54*256); // ALLY: This has to be a multiple of blockDim.x, otherwise the kernel has barrier divergence. To make things interesting I have made it slightly less than the total number of threads so that some thread blocks do not execute.
#ifdef FORCE_FAIL
__ensures(false);
#endif
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= deviceNumBodies)
{
return;
}
typename vec4<T>::Type position = oldPos[deviceOffset + index];
typename vec3<T>::Type accel = computeBodyAccel<T, multithreadBodies>(position, oldPos, totalNumBodies);
if (!multithreadBodies || (threadIdx.y == 0))
{
// acceleration = force \ mass;
// new velocity = old velocity + acceleration * deltaTime
// note we factor out the body's mass from the equation, here and in bodyBodyInteraction
// (because they cancel out). Thus here force == acceleration
typename vec4<T>::Type velocity = vel[deviceOffset + index];
velocity.x += accel.x * deltaTime;
velocity.y += accel.y * deltaTime;
velocity.z += accel.z * deltaTime;
velocity.x *= damping;
velocity.y *= damping;
velocity.z *= damping;
// new position = old position + velocity * deltaTime
position.x += velocity.x * deltaTime;
position.y += velocity.y * deltaTime;
position.z += velocity.z * deltaTime;
// store new position and velocity
newPos[deviceOffset + index] = position;
vel[deviceOffset + index] = velocity;
}
}
| 107e7693cf23a95ca1c01d6642da0fa1e01e9edb.cu | //pass
//--gridDim=[56,1,1] --blockDim=[256,1,1]
template<class T>
struct SharedMemory
{
__device__ __attribute__((always_inline)) inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ __attribute__((always_inline)) inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
template<typename T>
__device__ static __attribute__((always_inline)) T rsqrt_T(T x)
{
return rsqrt(x);
}
template<>
__device__ static __attribute__((always_inline)) float rsqrt_T<float>(float x)
{
return rsqrtf(x);
}
template <typename T> struct vec3
{
typedef float Type;
}; // dummy
template <> struct vec3<float>
{
typedef float3 Type;
};
template <> struct vec3<double>
{
typedef double3 Type;
};
template <typename T> struct vec4
{
typedef float Type;
}; // dummy
template <> struct vec4<float>
{
typedef float4 Type;
};
template <> struct vec4<double>
{
typedef double4 Type;
};
__constant__ float softeningSquared;
__constant__ double softeningSquared_fp64;
template <typename T>
__device__ static __attribute__((always_inline)) T getSofteningSquared()
{
return softeningSquared;
}
template <>
__device__ static __attribute__((always_inline)) double getSofteningSquared<double>()
{
return softeningSquared_fp64;
}
// Macros to simplify shared memory addressing
#define SX(i) sharedPos[i+blockDim.x*threadIdx.y]
// This macro is only used when multithreadBodies is true (below)
#define SX_SUM(i,j) sharedPos[i+blockDim.x*j]
template <typename T>
__device__ static __attribute__((always_inline)) typename vec3<T>::Type
bodyBodyInteraction(typename vec3<T>::Type ai,
typename vec4<T>::Type bi,
typename vec4<T>::Type bj)
{
typename vec3<T>::Type r;
// r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
T distSqr = r.x * r.x + r.y * r.y + r.z * r.z;
distSqr += getSofteningSquared<T>();
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
T invDist = rsqrt_T(distSqr);
T invDistCube = invDist * invDist * invDist;
// s = m_j * invDistCube [1 FLOP]
T s = bj.w * invDistCube;
// a_i = a_i + s * r_ij [6 FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
// This is the "tile_calculation" function from the GPUG3 article.
template <typename T>
__device__ static __attribute__((always_inline)) typename vec3<T>::Type
gravitation(typename vec4<T>::Type iPos,
typename vec3<T>::Type accel)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
// The CUDA 1.1 compiler cannot determine that i is not going to
// overflow in the loop below. Therefore if int is used on 64-bit linux
// or windows (or long instead of long long on win64), the compiler
// generates suboptimal code. Therefore we use long long on win64 and
// long on everything else. (Workaround for Bug ID 347697)
#ifdef _Win64
unsigned long long j = 0;
#else
unsigned long j = 0;
#endif
// Here we unroll the loop to reduce bookkeeping instruction overhead
// 32x unrolling seems to provide best performance
// Note that having an unsigned int loop counter and an unsigned
// long index helps the compiler generate efficient code on 64-bit
// OSes. The compiler can't assume the 64-bit index won't overflow
// so it incurs extra integer operations. This is a standard issue
// in porting 32-bit code to 64-bit OSes.
#pragma unroll 32
for (unsigned int counter = 0; counter < blockDim.x; counter++)
{
accel = bodyBodyInteraction<T>(accel, iPos, SX(j++));
}
return accel;
}
// WRAP is used to force each block to start working on a different
// chunk (and wrap around back to the beginning of the array) so that
// not all multiprocessors try to read the same memory locations at
// once.
#define WRAP(x,m) (((x)<(m))?(x):((x)-(m))) // Mod without divide, works on values from 0 up to 2m
template <typename T, bool multithreadBodies>
__device__ static __attribute__((always_inline)) typename vec3<T>::Type
computeBodyAccel(typename vec4<T>::Type bodyPos,
typename vec4<T>::Type *positions,
int numBodies)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f};
int p = blockDim.x;
int q = blockDim.y;
int n = numBodies;
int numTiles = n / (p * q);
extern __shared__ int __smem[]; // ALLY: Added to allow invariants to talk about this
for (int tile = blockIdx.y;
__global_invariant(__implies(__enabled() & __same_group, __uniform_int(tile))),
__invariant(__no_read(__smem)),
tile < numTiles + blockIdx.y; tile++)
{
sharedPos[threadIdx.x+blockDim.x*threadIdx.y] =
multithreadBodies ?
positions[WRAP(blockIdx.x + q * tile + threadIdx.y, gridDim.x) * p + threadIdx.x] :
positions[WRAP(blockIdx.x + tile, gridDim.x) * p + threadIdx.x];
__syncthreads();
// This is the "tile_calculation" function from the GPUG3 article.
acc = gravitation<T>(bodyPos, acc);
__syncthreads();
}
// When the numBodies / thread block size is < # multiprocessors (16 on G80), the GPU is
// underutilized. For example, with a 256 threads per block and 1024 bodies, there will only
// be 4 thread blocks, so the GPU will only be 25% utilized. To improve this, we use multiple
// threads per body. We still can use blocks of 256 threads, but they are arranged in q rows
// of p threads each. Each thread processes 1/q of the forces that affect each body, and then
// 1/q of the threads (those with threadIdx.y==0) add up the partial sums from the other
// threads for that body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4 threads per body and 256
// threads per block. There will be n/p = 16 blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads per body is greater
// than one, so that when it is not we don't have to execute the more complex code required!
if (multithreadBodies)
{
SX_SUM(threadIdx.x, threadIdx.y).x = acc.x;
SX_SUM(threadIdx.x, threadIdx.y).y = acc.y;
SX_SUM(threadIdx.x, threadIdx.y).z = acc.z;
__syncthreads();
// Save the result in global memory for the integration step
if (threadIdx.y == 0)
{
for (int i = 1; i < blockDim.y; i++)
{
acc.x += SX_SUM(threadIdx.x,i).x;
acc.y += SX_SUM(threadIdx.x,i).y;
acc.z += SX_SUM(threadIdx.x,i).z;
}
}
}
return acc;
}
template<typename T, bool multithreadBodies>
__global__ void
integrateBodies(typename vec4<T>::Type *newPos,
typename vec4<T>::Type *oldPos,
typename vec4<T>::Type *vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int totalNumBodies);
template
__global__ void
integrateBodies<float, false>(
vec4<float>::Type *newPos,
vec4<float>::Type *oldPos,
vec4<float>::Type *vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int totalNumBodies);
template<typename T, bool multithreadBodies>
__global__ void
integrateBodies(typename vec4<T>::Type *newPos,
typename vec4<T>::Type *oldPos,
typename vec4<T>::Type *vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int totalNumBodies)
{
__requires(deviceNumBodies == 54*256); // ALLY: This has to be a multiple of blockDim.x, otherwise the kernel has barrier divergence. To make things interesting I have made it slightly less than the total number of threads so that some thread blocks do not execute.
#ifdef FORCE_FAIL
__ensures(false);
#endif
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= deviceNumBodies)
{
return;
}
typename vec4<T>::Type position = oldPos[deviceOffset + index];
typename vec3<T>::Type accel = computeBodyAccel<T, multithreadBodies>(position, oldPos, totalNumBodies);
if (!multithreadBodies || (threadIdx.y == 0))
{
// acceleration = force \ mass;
// new velocity = old velocity + acceleration * deltaTime
// note we factor out the body's mass from the equation, here and in bodyBodyInteraction
// (because they cancel out). Thus here force == acceleration
typename vec4<T>::Type velocity = vel[deviceOffset + index];
velocity.x += accel.x * deltaTime;
velocity.y += accel.y * deltaTime;
velocity.z += accel.z * deltaTime;
velocity.x *= damping;
velocity.y *= damping;
velocity.z *= damping;
// new position = old position + velocity * deltaTime
position.x += velocity.x * deltaTime;
position.y += velocity.y * deltaTime;
position.z += velocity.z * deltaTime;
// store new position and velocity
newPos[deviceOffset + index] = position;
vel[deviceOffset + index] = velocity;
}
}
|
8a2f999e9120817613015c66c7adc77dd3d3c6a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sqr_mag_kernel(const float *data, float *result, int total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
if (idx / 2 < total) {
result[idx] = data[idx] * data[idx] + data[idx + 1] * data[idx + 1];
result[idx + 1] = 0;
}
} | 8a2f999e9120817613015c66c7adc77dd3d3c6a2.cu | #include "includes.h"
__global__ void sqr_mag_kernel(const float *data, float *result, int total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
if (idx / 2 < total) {
result[idx] = data[idx] * data[idx] + data[idx + 1] * data[idx + 1];
result[idx + 1] = 0;
}
} |
8bf07ed009368064c53044cd2a5cd2bda819f5be.hip | // !!! This is a file automatically generated by hipify!!!
#include "activation_function_cudaops.cuh"
#include "cuda/linalg_cudaops_t.cuh"
using namespace linalg::cuda;
struct sigmoid
{
__device__ float operator()(const float f) const
{
return 1.0f / (1.0f + ::exp(-f));
}
};
void annlib::cuda::cuda_sigmoid_apply(const mat_arr& in, mat_arr* target)
{
cuda_element_wise_operation(in, target, sigmoid());
}
struct sigmoid_derivative
{
__device__ float operator()(const float f) const
{
const float e_abs = std::abs(f);
if (e_abs > 5.0f)
return 1.0f / ::exp(e_abs);
const float v = ::exp(f) + 1.0f;
return ::exp(f) / (v * v);
}
};
void annlib::cuda::cuda_sigmoid_apply_derivative(const mat_arr& in, mat_arr* target)
{
cuda_element_wise_operation(in, target, sigmoid_derivative());
}
struct relu
{
__device__ float operator()(const float f) const
{
return ::max(0.0f, f);
}
};
void annlib::cuda::cuda_relu_apply(const mat_arr& in, mat_arr* target)
{
cuda_element_wise_operation(in, target, relu());
}
struct relu_derivative
{
__device__ float operator()(const float f) const
{
return f > 0 ? 1.0f : 0.0f;
}
};
void annlib::cuda::cuda_relu_apply_derivative(const mat_arr& in, mat_arr* target)
{
cuda_element_wise_operation(in, target, relu_derivative());
}
| 8bf07ed009368064c53044cd2a5cd2bda819f5be.cu | #include "activation_function_cudaops.cuh"
#include "cuda/linalg_cudaops_t.cuh"
using namespace linalg::cuda;
struct sigmoid
{
__device__ float operator()(const float f) const
{
return 1.0f / (1.0f + std::exp(-f));
}
};
void annlib::cuda::cuda_sigmoid_apply(const mat_arr& in, mat_arr* target)
{
cuda_element_wise_operation(in, target, sigmoid());
}
struct sigmoid_derivative
{
__device__ float operator()(const float f) const
{
const float e_abs = std::abs(f);
if (e_abs > 5.0f)
return 1.0f / std::exp(e_abs);
const float v = std::exp(f) + 1.0f;
return std::exp(f) / (v * v);
}
};
void annlib::cuda::cuda_sigmoid_apply_derivative(const mat_arr& in, mat_arr* target)
{
cuda_element_wise_operation(in, target, sigmoid_derivative());
}
struct relu
{
__device__ float operator()(const float f) const
{
return std::max(0.0f, f);
}
};
void annlib::cuda::cuda_relu_apply(const mat_arr& in, mat_arr* target)
{
cuda_element_wise_operation(in, target, relu());
}
struct relu_derivative
{
__device__ float operator()(const float f) const
{
return f > 0 ? 1.0f : 0.0f;
}
};
void annlib::cuda::cuda_relu_apply_derivative(const mat_arr& in, mat_arr* target)
{
cuda_element_wise_operation(in, target, relu_derivative());
}
|
1ae8f2664fd827b374947cc97513062e33b66d27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#define ARRAY_SIZE 5
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// Kernel definition
__global__ void addKernel(int* d_a, int* d_b, int* d_c) {
int i = threadIdx.x;
d_c[i] = d_a[i] + d_b[i];
}
void onDevice(int* h_a, int* h_b, int* h_c) {
int* d_c;
// allocate memory on the device
hipMalloc((void**)&d_c, ARRAY_BYTES);
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(ARRAY_SIZE), 0, 0, h_a, h_b, d_c);
// Copy memory from Device to Host
hipMemcpy(&h_c, d_c, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipFree(d_c);
}
void onHost() {
int h_a[ARRAY_SIZE];
int h_b[ARRAY_SIZE];
int* h_c;
// allocate memory on the host
h_c = (int*)malloc(ARRAY_BYTES);
for (int i = 0; i < ARRAY_SIZE; i++) {
h_a[i] = 1;
h_b[i] = 1;
h_c[i] = 0;
}
onDevice(h_a, h_b, h_c);
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%i \n", h_a[0]);
assert(h_a[i] + h_b[i] == h_c[i]);
}
printf("-: successful execution :-\n");
free(h_c);
}
int main() {
onHost();
return 0;
}
| 1ae8f2664fd827b374947cc97513062e33b66d27.cu | #include <assert.h>
#include <stdio.h>
#define ARRAY_SIZE 5
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// Kernel definition
__global__ void addKernel(int* d_a, int* d_b, int* d_c) {
int i = threadIdx.x;
d_c[i] = d_a[i] + d_b[i];
}
void onDevice(int* h_a, int* h_b, int* h_c) {
int* d_c;
// allocate memory on the device
cudaMalloc((void**)&d_c, ARRAY_BYTES);
addKernel<<<1, ARRAY_SIZE>>>(h_a, h_b, d_c);
// Copy memory from Device to Host
cudaMemcpy(&h_c, d_c, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaFree(d_c);
}
void onHost() {
int h_a[ARRAY_SIZE];
int h_b[ARRAY_SIZE];
int* h_c;
// allocate memory on the host
h_c = (int*)malloc(ARRAY_BYTES);
for (int i = 0; i < ARRAY_SIZE; i++) {
h_a[i] = 1;
h_b[i] = 1;
h_c[i] = 0;
}
onDevice(h_a, h_b, h_c);
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%i \n", h_a[0]);
assert(h_a[i] + h_b[i] == h_c[i]);
}
printf("-: successful execution :-\n");
free(h_c);
}
int main() {
onHost();
return 0;
}
|
391f098e20f56395547448753bc1fcbca24955a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
__global__ void vec_add(float *a, float *b, float *c, int n)
{
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < n)
{
c[global_id] = a[global_id] + b[global_id];
}
}
| 391f098e20f56395547448753bc1fcbca24955a3.cu | #include "kernel.h"
__global__ void vec_add(float *a, float *b, float *c, int n)
{
int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < n)
{
c[global_id] = a[global_id] + b[global_id];
}
}
|
5e2403aabc331e1aaf8e5146709051811e38ea07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "trovato_2019.h"
#include <stddef.h>
#include <stdint.h>
__global__ void kernel_set_model_initial_conditions(real *sv, int num_volumes, size_t pitch, bool use_adpt_dt, real min_dt) {
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes) {
real STATES[NEQ];
// Steady-state 40 pulses (BCL=1000ms)
STATES[0] = -86.7099;
STATES[1] = 0.005431;
STATES[2] = 0.000104;
STATES[3] = 8.25533;
STATES[4] = 8.25502;
STATES[5] = 8.25503;
STATES[6] = 143.743;
STATES[7] = 143.744;
STATES[8] = 143.744;
STATES[9] = 4.4e-05;
STATES[10] = 0.000103;
STATES[11] = 1.26947;
STATES[12] = 1.25254;
STATES[13] = 1.27103;
STATES[14] = 1.1e-05;
STATES[15] = 0;
STATES[16] = 0.006303;
STATES[17] = 0.789469;
STATES[18] = 0.789392;
STATES[19] = 0.791301;
STATES[20] = 0.580955;
STATES[21] = 0.791719;
STATES[22] = 0.000241;
STATES[23] = 0.463851;
STATES[24] = 0.239936;
STATES[25] = 0.000272;
STATES[26] = 0.646362;
STATES[27] = 0.98999;
STATES[28] = 0;
STATES[29] = 1;
STATES[30] = 0.926919;
STATES[31] = 1;
STATES[32] = 1;
STATES[33] = 0.999976;
STATES[34] = 1;
STATES[35] = 1;
STATES[36] = 0.005885;
STATES[37] = 0.000303;
STATES[38] = 0.994251;
STATES[39] = 0.000367;
STATES[40] = 0.566131;
STATES[41] = 0.189842;
STATES[42] = 0.000222;
STATES[43] = 0.233515;
STATES[44] = 0.997077;
STATES[45] = 0.471259;
for (int i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = STATES[i];
if(use_adpt_dt) {
*((real *)((char *)sv + pitch * NEQ) + threadID) = min_dt; // dt
*((real *)((char *)sv + pitch * (NEQ + 1)) + threadID) = 0.0; // time_new
*((real *)((char *)sv + pitch * (NEQ + 2)) + threadID) = 0.0; // previous dt
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt, size_t pitch, bool use_adpt_dt) {
// Get the stimulus current from the current cell
real calc_I_stim = stim_current;
// State variables
real V;
real CaMKt;
real cass;
real nai;
real nasl;
real nass;
real ki;
real kss;
real ksl;
real cai;
real casl;
real cansr;
real cajsr;
real cacsr;
real Jrel1;
real Jrel2;
real m;
real hf;
real hs;
real j;
real hsp;
real jp;
real mL;
real hL;
real hLp;
real a;
real i1;
real i2;
real d;
real ff;
real fs;
real fcaf;
real fcas;
real jca;
real ffp;
real fcafp;
real nca;
real b;
real g;
real xrf;
real xrs;
real xs1;
real xs2;
real y;
real xk1;
real u;
if (use_adpt_dt)
{
V = sv[0];
CaMKt = sv[1];
cass = sv[2];
nai = sv[3];
nasl = sv[4];
nass = sv[5];
ki = sv[6];
kss = sv[7];
ksl = sv[8];
cai = sv[9];
casl = sv[10];
cansr = sv[11];
cajsr = sv[12];
cacsr = sv[13];
Jrel1 = sv[14];
Jrel2 = sv[15];
m = sv[16];
hf = sv[17];
hs = sv[18];
j = sv[19];
hsp = sv[20];
jp = sv[21];
mL = sv[22];
hL = sv[23];
hLp = sv[24];
a = sv[25];
i1 = sv[26];
i2 = sv[27];
d = sv[28];
ff = sv[29];
fs = sv[30];
fcaf = sv[31];
fcas = sv[32];
jca = sv[33];
ffp = sv[34];
fcafp = sv[35];
nca = sv[36];
b = sv[37];
g = sv[38];
xrf = sv[39];
xrs = sv[40];
xs1 = sv[41];
xs2 = sv[42];
y = sv[43];
xk1 = sv[44];
u = sv[45];
}
else
{
V = *((real *)((char *)sv + pitch * 0) + threadID_);
CaMKt = *((real *)((char *)sv + pitch * 1) + threadID_);
cass = *((real *)((char *)sv + pitch * 2) + threadID_);
nai = *((real *)((char *)sv + pitch * 3) + threadID_);
nasl = *((real *)((char *)sv + pitch * 4) + threadID_);
nass = *((real *)((char *)sv + pitch * 5) + threadID_);
ki = *((real *)((char *)sv + pitch * 6) + threadID_);
kss = *((real *)((char *)sv + pitch * 7) + threadID_);
ksl = *((real *)((char *)sv + pitch * 8) + threadID_);
cai = *((real *)((char *)sv + pitch * 9) + threadID_);
casl = *((real *)((char *)sv + pitch * 10) + threadID_);
cansr = *((real *)((char *)sv + pitch * 11) + threadID_);
cajsr = *((real *)((char *)sv + pitch * 12) + threadID_);
cacsr = *((real *)((char *)sv + pitch * 13) + threadID_);
Jrel1 = *((real *)((char *)sv + pitch * 14) + threadID_);
Jrel2 = *((real *)((char *)sv + pitch * 15) + threadID_);
m = *((real *)((char *)sv + pitch * 16) + threadID_);
hf = *((real *)((char *)sv + pitch * 17) + threadID_);
hs = *((real *)((char *)sv + pitch * 18) + threadID_);
j = *((real *)((char *)sv + pitch * 19) + threadID_);
hsp = *((real *)((char *)sv + pitch * 20) + threadID_);
jp = *((real *)((char *)sv + pitch * 21) + threadID_);
mL = *((real *)((char *)sv + pitch * 22) + threadID_);
hL = *((real *)((char *)sv + pitch * 23) + threadID_);
hLp = *((real *)((char *)sv + pitch * 24) + threadID_);
a = *((real *)((char *)sv + pitch * 25) + threadID_);
i1 = *((real *)((char *)sv + pitch * 26) + threadID_);
i2 = *((real *)((char *)sv + pitch * 27) + threadID_);
d = *((real *)((char *)sv + pitch * 28) + threadID_);
ff = *((real *)((char *)sv + pitch * 29) + threadID_);
fs = *((real *)((char *)sv + pitch * 30) + threadID_);
fcaf = *((real *)((char *)sv + pitch * 31) + threadID_);
fcas = *((real *)((char *)sv + pitch * 32) + threadID_);
jca = *((real *)((char *)sv + pitch * 33) + threadID_);
ffp = *((real *)((char *)sv + pitch * 34) + threadID_);
fcafp = *((real *)((char *)sv + pitch * 35) + threadID_);
nca = *((real *)((char *)sv + pitch * 36) + threadID_);
b = *((real *)((char *)sv + pitch * 37) + threadID_);
g = *((real *)((char *)sv + pitch * 38) + threadID_);
xrf = *((real *)((char *)sv + pitch * 39) + threadID_);
xrs = *((real *)((char *)sv + pitch * 40) + threadID_);
xs1 = *((real *)((char *)sv + pitch * 41) + threadID_);
xs2 = *((real *)((char *)sv + pitch * 42) + threadID_);
y = *((real *)((char *)sv + pitch * 43) + threadID_);
xk1 = *((real *)((char *)sv + pitch * 44) + threadID_);
u = *((real *)((char *)sv + pitch * 45) + threadID_);
}
#include "trovato_2019_common.inc"
}
//Include the default solver used by all models.
#include "../default_solvers.cu"
| 5e2403aabc331e1aaf8e5146709051811e38ea07.cu | #include "trovato_2019.h"
#include <stddef.h>
#include <stdint.h>
__global__ void kernel_set_model_initial_conditions(real *sv, int num_volumes, size_t pitch, bool use_adpt_dt, real min_dt) {
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes) {
real STATES[NEQ];
// Steady-state 40 pulses (BCL=1000ms)
STATES[0] = -86.7099;
STATES[1] = 0.005431;
STATES[2] = 0.000104;
STATES[3] = 8.25533;
STATES[4] = 8.25502;
STATES[5] = 8.25503;
STATES[6] = 143.743;
STATES[7] = 143.744;
STATES[8] = 143.744;
STATES[9] = 4.4e-05;
STATES[10] = 0.000103;
STATES[11] = 1.26947;
STATES[12] = 1.25254;
STATES[13] = 1.27103;
STATES[14] = 1.1e-05;
STATES[15] = 0;
STATES[16] = 0.006303;
STATES[17] = 0.789469;
STATES[18] = 0.789392;
STATES[19] = 0.791301;
STATES[20] = 0.580955;
STATES[21] = 0.791719;
STATES[22] = 0.000241;
STATES[23] = 0.463851;
STATES[24] = 0.239936;
STATES[25] = 0.000272;
STATES[26] = 0.646362;
STATES[27] = 0.98999;
STATES[28] = 0;
STATES[29] = 1;
STATES[30] = 0.926919;
STATES[31] = 1;
STATES[32] = 1;
STATES[33] = 0.999976;
STATES[34] = 1;
STATES[35] = 1;
STATES[36] = 0.005885;
STATES[37] = 0.000303;
STATES[38] = 0.994251;
STATES[39] = 0.000367;
STATES[40] = 0.566131;
STATES[41] = 0.189842;
STATES[42] = 0.000222;
STATES[43] = 0.233515;
STATES[44] = 0.997077;
STATES[45] = 0.471259;
for (int i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = STATES[i];
if(use_adpt_dt) {
*((real *)((char *)sv + pitch * NEQ) + threadID) = min_dt; // dt
*((real *)((char *)sv + pitch * (NEQ + 1)) + threadID) = 0.0; // time_new
*((real *)((char *)sv + pitch * (NEQ + 2)) + threadID) = 0.0; // previous dt
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt, size_t pitch, bool use_adpt_dt) {
// Get the stimulus current from the current cell
real calc_I_stim = stim_current;
// State variables
real V;
real CaMKt;
real cass;
real nai;
real nasl;
real nass;
real ki;
real kss;
real ksl;
real cai;
real casl;
real cansr;
real cajsr;
real cacsr;
real Jrel1;
real Jrel2;
real m;
real hf;
real hs;
real j;
real hsp;
real jp;
real mL;
real hL;
real hLp;
real a;
real i1;
real i2;
real d;
real ff;
real fs;
real fcaf;
real fcas;
real jca;
real ffp;
real fcafp;
real nca;
real b;
real g;
real xrf;
real xrs;
real xs1;
real xs2;
real y;
real xk1;
real u;
if (use_adpt_dt)
{
V = sv[0];
CaMKt = sv[1];
cass = sv[2];
nai = sv[3];
nasl = sv[4];
nass = sv[5];
ki = sv[6];
kss = sv[7];
ksl = sv[8];
cai = sv[9];
casl = sv[10];
cansr = sv[11];
cajsr = sv[12];
cacsr = sv[13];
Jrel1 = sv[14];
Jrel2 = sv[15];
m = sv[16];
hf = sv[17];
hs = sv[18];
j = sv[19];
hsp = sv[20];
jp = sv[21];
mL = sv[22];
hL = sv[23];
hLp = sv[24];
a = sv[25];
i1 = sv[26];
i2 = sv[27];
d = sv[28];
ff = sv[29];
fs = sv[30];
fcaf = sv[31];
fcas = sv[32];
jca = sv[33];
ffp = sv[34];
fcafp = sv[35];
nca = sv[36];
b = sv[37];
g = sv[38];
xrf = sv[39];
xrs = sv[40];
xs1 = sv[41];
xs2 = sv[42];
y = sv[43];
xk1 = sv[44];
u = sv[45];
}
else
{
V = *((real *)((char *)sv + pitch * 0) + threadID_);
CaMKt = *((real *)((char *)sv + pitch * 1) + threadID_);
cass = *((real *)((char *)sv + pitch * 2) + threadID_);
nai = *((real *)((char *)sv + pitch * 3) + threadID_);
nasl = *((real *)((char *)sv + pitch * 4) + threadID_);
nass = *((real *)((char *)sv + pitch * 5) + threadID_);
ki = *((real *)((char *)sv + pitch * 6) + threadID_);
kss = *((real *)((char *)sv + pitch * 7) + threadID_);
ksl = *((real *)((char *)sv + pitch * 8) + threadID_);
cai = *((real *)((char *)sv + pitch * 9) + threadID_);
casl = *((real *)((char *)sv + pitch * 10) + threadID_);
cansr = *((real *)((char *)sv + pitch * 11) + threadID_);
cajsr = *((real *)((char *)sv + pitch * 12) + threadID_);
cacsr = *((real *)((char *)sv + pitch * 13) + threadID_);
Jrel1 = *((real *)((char *)sv + pitch * 14) + threadID_);
Jrel2 = *((real *)((char *)sv + pitch * 15) + threadID_);
m = *((real *)((char *)sv + pitch * 16) + threadID_);
hf = *((real *)((char *)sv + pitch * 17) + threadID_);
hs = *((real *)((char *)sv + pitch * 18) + threadID_);
j = *((real *)((char *)sv + pitch * 19) + threadID_);
hsp = *((real *)((char *)sv + pitch * 20) + threadID_);
jp = *((real *)((char *)sv + pitch * 21) + threadID_);
mL = *((real *)((char *)sv + pitch * 22) + threadID_);
hL = *((real *)((char *)sv + pitch * 23) + threadID_);
hLp = *((real *)((char *)sv + pitch * 24) + threadID_);
a = *((real *)((char *)sv + pitch * 25) + threadID_);
i1 = *((real *)((char *)sv + pitch * 26) + threadID_);
i2 = *((real *)((char *)sv + pitch * 27) + threadID_);
d = *((real *)((char *)sv + pitch * 28) + threadID_);
ff = *((real *)((char *)sv + pitch * 29) + threadID_);
fs = *((real *)((char *)sv + pitch * 30) + threadID_);
fcaf = *((real *)((char *)sv + pitch * 31) + threadID_);
fcas = *((real *)((char *)sv + pitch * 32) + threadID_);
jca = *((real *)((char *)sv + pitch * 33) + threadID_);
ffp = *((real *)((char *)sv + pitch * 34) + threadID_);
fcafp = *((real *)((char *)sv + pitch * 35) + threadID_);
nca = *((real *)((char *)sv + pitch * 36) + threadID_);
b = *((real *)((char *)sv + pitch * 37) + threadID_);
g = *((real *)((char *)sv + pitch * 38) + threadID_);
xrf = *((real *)((char *)sv + pitch * 39) + threadID_);
xrs = *((real *)((char *)sv + pitch * 40) + threadID_);
xs1 = *((real *)((char *)sv + pitch * 41) + threadID_);
xs2 = *((real *)((char *)sv + pitch * 42) + threadID_);
y = *((real *)((char *)sv + pitch * 43) + threadID_);
xk1 = *((real *)((char *)sv + pitch * 44) + threadID_);
u = *((real *)((char *)sv + pitch * 45) + threadID_);
}
#include "trovato_2019_common.inc"
}
//Include the default solver used by all models.
#include "../default_solvers.cu"
|
50b27de05789061aea76d961abf2586493096aa7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <rocblas.h>
#include <hiprand/hiprand.h>
#include <stdlib.h>
#define TRAINING (false)
#define PINNED_MEM 0
#define GROUP_GEMM 0
#define USE_GEMM_STREAMS 1
#define FUSE_PW 0
#define PRE_TRANSPOSE 0
#define RECUR_BATCH_SIZE 1
#define USE_LAYERS_STREAMS 0
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
// Device functions
__forceinline__ __device__ float sigmoidf(float in) {
return 1.f / (1.f + expf(-in));
}
// Pointwise functions
__global__ void pw_biasAdd(float *y, float *bias, int n, int nBias) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] += bias[i % nBias];
}
__global__ void pw_vecAdd(float *y, float *a, float *b, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a[i] + b[i];
}
__global__ void pw_vecMul(float *y, float *a, float *b, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a[i] * b[i];
}
__global__ void pw_tanh(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = tanh(a[i]);
}
__global__ void pw_sigmoid(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = sigmoidf(a[i]);
}
// Unfused LSTM (calling many pointwise kernels).
int LSTM_unit_unfused(int hiddenSize,
int miniBatch,
float * __restrict__ h_in, // h(t-1) * R
float * __restrict__ x_in, // x(t) * W
float * __restrict__ bias,
float * __restrict__ h_out,// h(t)
float * __restrict__ x_out,
float * __restrict__ c_in, // c(t-1)
float * __restrict__ c_out,// c(t)
hipStream_t stream) {
dim3 blockDim, gridDim;
int numElements = hiddenSize * miniBatch;
blockDim.x = 128;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
// element wise calculations
// x(t) = x(t) * W + h(t-1) * R + bias, as input to this unit
for (int i = 0; i < 4; i++) {
hipLaunchKernelGGL(( pw_vecAdd) , dim3(gridDim), dim3(blockDim), 0, stream , x_in + i * numElements,
x_in + i * numElements,
h_in + i * numElements,
numElements);
cudaErrCheck(hipGetLastError());
hipLaunchKernelGGL(( pw_biasAdd) , dim3(gridDim), dim3(blockDim), 0, stream , x_in + i * numElements,
bias + i * hiddenSize,
numElements,
hiddenSize);
cudaErrCheck(hipGetLastError());
hipLaunchKernelGGL(( pw_biasAdd) , dim3(gridDim), dim3(blockDim), 0, stream , x_in + i * numElements,
bias + (i + 4) * hiddenSize,
numElements,
hiddenSize);
cudaErrCheck(hipGetLastError());
}
// x(t) goes through 4 gates' activation
hipLaunchKernelGGL(( pw_sigmoid) , dim3(gridDim), dim3(blockDim), 0, stream , x_in + 0 * numElements, x_in + 0 * numElements, numElements);
cudaErrCheck(hipGetLastError());
hipLaunchKernelGGL(( pw_sigmoid) , dim3(gridDim), dim3(blockDim), 0, stream , x_in + 1 * numElements, x_in + 1 * numElements, numElements);
cudaErrCheck(hipGetLastError());
hipLaunchKernelGGL(( pw_tanh) , dim3(gridDim), dim3(blockDim), 0, stream , x_in + 2 * numElements, x_in + 2 * numElements, numElements);
cudaErrCheck(hipGetLastError());
hipLaunchKernelGGL(( pw_sigmoid) , dim3(gridDim), dim3(blockDim), 0, stream , x_in + 3 * numElements, x_in + 3 * numElements, numElements);
cudaErrCheck(hipGetLastError());
// assign location to 4 gates
float *in_gate = x_in + 0 * numElements;
float *forget_gate = x_in + 1 * numElements;
float *in_gate2 = x_in + 2 * numElements;
float *out_gate = x_in + 3 * numElements;
// f(t) *= c(t-1)
hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , forget_gate, forget_gate, c_in, numElements);
cudaErrCheck(hipGetLastError());
// i(t) *= g(t)
hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, in_gate, in_gate2, numElements);
cudaErrCheck(hipGetLastError());
// i(t) += f(t)
hipLaunchKernelGGL(( pw_vecAdd) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, in_gate, forget_gate, numElements);
cudaErrCheck(hipGetLastError());
// c(t) = i(t), output cell state
cudaErrCheck(hipMemcpyAsync(c_out, in_gate, numElements * sizeof(float), hipMemcpyDeviceToDevice, stream));
// i(t) = tanh(i(t)), i(t) === c(t) here, but we must not modify c(t)
hipLaunchKernelGGL(( pw_tanh) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, in_gate, numElements);
cudaErrCheck(hipGetLastError());
// h(t) = i(t) * o(t)
hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , h_out, out_gate, in_gate, numElements);
cudaErrCheck(hipGetLastError());
cudaErrCheck(hipMemcpyAsync(x_out, h_out, numElements * sizeof(float), hipMemcpyDeviceToDevice, stream));
return 0;
}
__global__ void LSTM_unit_fused(int hiddenSize,
int miniBatch,
float * __restrict__ h_in,
float * __restrict__ x_in,
float * __restrict__ bias,
float * __restrict__ linearGates,
float * __restrict__ h_out,
float * __restrict__ x_out,
float * __restrict__ c_in,
float * __restrict__ c_out,
bool training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int numElements = miniBatch * hiddenSize;
if (index >= numElements) return;
int currentBatch = index / hiddenSize;
int gateIndex = (index % hiddenSize) + 4 * currentBatch * hiddenSize;
float gate[4];
for (int i = 0; i < 4; i++) {
gate[i] = x_in[i * hiddenSize + gateIndex] + h_in[i * hiddenSize + gateIndex];
gate[i] += bias[i * hiddenSize + index % hiddenSize] + bias[(i + 4) * hiddenSize + index % hiddenSize];
if (training) linearGates[gateIndex + i * hiddenSize] = gate[i];
}
float in_gate = sigmoidf(gate[0]);
float forget_gate = sigmoidf(gate[1]);
float in_gate2 = tanhf(gate[2]);
float out_gate = sigmoidf(gate[3]);
float value = (c_in[index] * forget_gate) + (in_gate * in_gate2);
c_out[index] = value;
value = out_gate * tanhf(value);
h_out[index] = value;
x_out[index] = value;
}
float LSTMTest(int hiddenSize, int miniBatch, int seqLength, int numLayers, bool checkF) {
int numElements = hiddenSize * miniBatch;
// alloc device memory
float *h_data, *x_data, *c_data;
cudaErrCheck(hipMalloc((void**)&h_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&x_data, (seqLength) * (numLayers + 1) * numElements * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&c_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float)));
float *weight, *weight_T;
cudaErrCheck(hipMalloc((void**)&weight, numLayers * hiddenSize * hiddenSize * 8 * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&weight_T, numLayers * hiddenSize * hiddenSize * 8 * sizeof(float)));
float *bias;
cudaErrCheck(hipMalloc((void**)&bias, numLayers * hiddenSize * 8 * sizeof(float)));
float *h_in, *x_in;
cudaErrCheck(hipMalloc((void**)&h_in, 4 * numLayers * numElements * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&x_in, 4 * seqLength * numElements * sizeof(float)));
float *linearGates;
// Activations
if (TRAINING) {
cudaErrCheck(hipMalloc((void**)&linearGates, 4 * seqLength * numLayers * numElements * sizeof(float)));
}
// operation wise streams for optimization 2
hipStream_t stream_x_single, stream_h_single;
// (operation + layer) wise streams for optimization 6
hipStream_t *stream_x, *stream_h;
stream_x = (hipStream_t*)malloc(numLayers * sizeof(hipStream_t));
stream_h = (hipStream_t*)malloc(numLayers * sizeof(hipStream_t));
if (USE_GEMM_STREAMS) {
// optimization 2 uses different streams for x and h
cudaErrCheck(hipStreamCreate(&stream_x_single));
cudaErrCheck(hipStreamCreateWithPriority(&stream_h_single, 0, -1));
}
else {
for (int i = 0; i < numLayers; i++) {
if (USE_LAYERS_STREAMS) {
// optimization 6 uses different streams for various layers
cudaErrCheck(hipStreamCreate(&stream_x[i]));
cudaErrCheck(hipStreamCreateWithPriority(&stream_h[i], 0, -1));
}
else {
stream_x[i] = NULL;
stream_h[i] = NULL;
}
}
}
// alloc events
hipEvent_t **events_x, **events_h;
events_x = (hipEvent_t**)malloc(numLayers * sizeof(hipEvent_t*));
events_h = (hipEvent_t**)malloc(numLayers * sizeof(hipEvent_t*));
for (int i = 0; i < numLayers; i++) {
events_x[i] = (hipEvent_t*)malloc(seqLength * sizeof(hipEvent_t));
events_h[i] = (hipEvent_t*)malloc(seqLength * sizeof(hipEvent_t));
}
// initiate random inputs
hiprandGenerator_t gen;
curandErrCheck(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(hiprandSetPseudoRandomGeneratorSeed(gen, 1782ULL));
//cudaErrCheck(hipMemset(h_data, 0, (numLayers) * (seqLength + 1) * numElements * sizeof(float)));
//cudaErrCheck(hipMemset(c_data, 0, (numLayers) * (seqLength + 1) * numElements * sizeof(float)));
curandErrCheck(hiprandGenerateUniform(gen, h_data, (seqLength + 1) * (numLayers) * numElements));
curandErrCheck(hiprandGenerateUniform(gen, c_data, (seqLength + 1) * (numLayers) * numElements));
curandErrCheck(hiprandGenerateUniform(gen, x_data, (seqLength) * (numLayers + 1) * numElements));
curandErrCheck(hiprandGenerateUniform(gen, weight, numLayers * hiddenSize * hiddenSize * 8));
curandErrCheck(hiprandGenerateUniform(gen, bias, numLayers * hiddenSize * 8));
curandErrCheck(hiprandDestroyGenerator(gen));
// create cuBLAS handle.
hipblasHandle_t handle;
cublasErrCheck(hipblasCreate(&handle));
cudaErrCheck(hipDeviceSynchronize());
// start timing
float elapsedTime;
hipEvent_t start, stop;
cudaErrCheck(hipEventCreate(&start));
cudaErrCheck(hipEventCreate(&stop));
cudaErrCheck(hipEventRecord(start));
// LSTM
const hipblasOperation_t a_trans = (PRE_TRANSPOSE && (seqLength > 1)) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t b_trans = HIPBLAS_OP_N; // always N
// hipblasSgemm(): C = alpha * (A + B) + beta * C
float alpha = 1.f;
float beta = 0.f;
if (a_trans == HIPBLAS_OP_N) {
// do optimization 4 here, transpose A
for (int layer = 0; layer <numLayers; layer++) {
// determine whether using same streams among layers
hipStream_t stream_x_this_layer, stream_h_this_layer;
if (USE_GEMM_STREAMS) {
stream_x_this_layer = stream_x_single;
stream_h_this_layer = stream_h_single;
}
else {
stream_x_this_layer = stream_x[layer];
stream_h_this_layer = stream_h[layer];
}
// for x(t)
float *W_weight_in = weight + layer * hiddenSize * hiddenSize * 8;
float *W_weight_out = weight_T + layer * hiddenSize * hiddenSize * 8;
// for h(t-1)
float *R_weight_in = weight + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4;
float *R_weight_out = weight_T + layer *hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4;
cublasErrCheck(hipblasSetStream(handle, stream_x_this_layer));
cublasErrCheck(hipblasSgeam(handle, HIPBLAS_OP_T, // trans A
HIPBLAS_OP_N, // trans B
4 * hiddenSize, // #rows in A & C
hiddenSize, // #cols in B & C
&alpha, // scale A
W_weight_in, // A
hiddenSize, // leading dim in A
&beta, // scale B
NULL, // B
4 * hiddenSize, // leading dim in B
W_weight_out, // C
4 * hiddenSize)); // leading dim in C
cublasErrCheck(hipblasSetStream(handle, stream_h_this_layer));
cublasErrCheck(hipblasSgeam(handle, HIPBLAS_OP_T, // trans A
HIPBLAS_OP_N, // trans B
4 * hiddenSize, // #rows in A & C
hiddenSize, // #cols in B & C
&alpha, // scale A
R_weight_in, // A
hiddenSize, // leading dim in A
&beta, // scale B
NULL, // B
4 * hiddenSize, // leading dim in B
R_weight_out, // C
4 * hiddenSize)); // leading dim in C
}
}
else {
weight_T = weight;
}
int lStart = 0; // layer starts from
int lEnd = 0; // layer ends at
int tStart = 0; // timestep starts from
int tEnd = 0; // timestep ends at
int recurBatchSize = RECUR_BATCH_SIZE; // optimization 5 will make it 2
while (true) {
// Many layer "scheduling".
if (lEnd == 0) {
lStart = 0;
lEnd = 1;
tStart = 0;
}
else {
// Move "up" and "left"
lStart++;
lEnd++;
tStart -= recurBatchSize;
// Over the top or off the left, reset to layer 0
if (lEnd > numLayers || tStart < 0) {
tStart += (lStart + 1) * recurBatchSize;
lStart = 0;
lEnd = 1;
}
// Off the right, step up
while (tStart >= seqLength && lEnd <= numLayers) {
lStart++;
lEnd++;
tStart -= recurBatchSize;
}
// Over the top or off the left, done!
if (lEnd > numLayers || tStart < 0) {
break;
}
}
tEnd = tStart + recurBatchSize;
if (tEnd > seqLength) tEnd = seqLength;
// lStart, lEnd always differ 1
for (int layer = lStart; layer < lEnd; layer++) {
// determine whether using same streams among layers
hipStream_t stream_x_this_layer, stream_h_this_layer;
if (USE_GEMM_STREAMS) {
stream_x_this_layer = stream_x_single;
stream_h_this_layer = stream_h_single;
}
else {
stream_x_this_layer = stream_x[layer];
stream_h_this_layer = stream_h[layer];
}
// do x(t) * W_weight on stream_x[layer]
cublasErrCheck(hipblasSetStream(handle, stream_x_this_layer));
// tStart, tEnd differ recurBatchSize
for (int i = tStart; i < tEnd; i++) {
if (layer > 0) {
cudaErrCheck(hipStreamWaitEvent(stream_x_this_layer, events_h[layer - 1][i], 0));
cudaErrCheck(hipEventDestroy(events_h[layer - 1][i]));
}
}
// x(t) *= [W_weight]
if (GROUP_GEMM) {
// do optimization 1 here
cublasErrCheck(hipblasSgemm(handle,
a_trans, b_trans,
4 * hiddenSize, // #rows of A and C
miniBatch * (tEnd - tStart), // #cols of B and C
hiddenSize, // #cols of A and B
&alpha,
&weight_T[layer * 8 * hiddenSize * hiddenSize], // A
a_trans == HIPBLAS_OP_N ? 4 * hiddenSize : hiddenSize, // leading dimension of A, where we can try different data layout
x_data + tStart * numElements + layer * seqLength * numElements, // B
hiddenSize, // leading dimension of B, where we can try different data layout
&beta,
x_in + 4 * tStart * numElements, // C
4 * hiddenSize // leading dimension of C
));
}
else {
for (int igemm =0; igemm < 4; igemm++) {
cublasErrCheck(hipblasSgemm(handle,
a_trans, b_trans,
hiddenSize, // #rows of A and C
miniBatch * (tEnd - tStart), // #cols of B and C
hiddenSize, // #cols of A and B
&alpha,
&weight_T[layer * 8 * hiddenSize * hiddenSize + igemm * hiddenSize], // A
a_trans == HIPBLAS_OP_N ? 4 * hiddenSize : hiddenSize, // leading dimension of A, where we can try different data layout
x_data + tStart * numElements + layer * seqLength * numElements, // B
hiddenSize, // leading dimension of B, where we can try different data layout
&beta,
x_in + 4 * tStart * numElements + igemm * hiddenSize, // C
4 * hiddenSize // leading dimension of C
));
}
}
for (int i = tStart; i < tEnd; i++) {
cudaErrCheck(hipEventCreate(&events_x[layer][i], hipEventDisableTiming));
cudaErrCheck(hipEventRecord(events_x[layer][i], stream_x_this_layer));
}
for (int i = tStart; i < tEnd; i++) {
// do h(t-1) *= [R_weight] on stream_h[layer]
cublasErrCheck(hipblasSetStream(handle, stream_h_this_layer));
// h(t-1) *= [R_weight]
if (GROUP_GEMM) {
// do optimization 1 here
cublasErrCheck(hipblasSgemm(handle,
a_trans, b_trans,
4 * hiddenSize, miniBatch, hiddenSize,
&alpha,
&weight_T[4 * hiddenSize * hiddenSize + layer * 8 * hiddenSize * hiddenSize],
a_trans == HIPBLAS_OP_N ? 4 * hiddenSize : hiddenSize,
h_data + i * numElements + layer * (seqLength + 1) * numElements,
hiddenSize,
&beta,
h_in + 4 * layer * numElements,
4 * hiddenSize));
}
else {
for (int igemm =0; igemm < 4; igemm++) {
cublasErrCheck(hipblasSgemm(handle,
a_trans, b_trans,
hiddenSize, miniBatch, hiddenSize,
&alpha,
&weight_T[4 * hiddenSize * hiddenSize + layer * 8 * hiddenSize * hiddenSize + igemm * hiddenSize],
a_trans == HIPBLAS_OP_N ? 4 * hiddenSize : hiddenSize,
h_data + i * numElements + layer * (seqLength + 1) * numElements,
hiddenSize,
&beta,
h_in + 4 * layer * numElements + igemm * hiddenSize,
4 * hiddenSize));
}
}
cudaErrCheck(hipStreamWaitEvent(stream_h_this_layer, events_x[layer][i], 0));
cudaErrCheck(hipEventDestroy(events_x[layer][i]));
if (FUSE_PW) {
// optimization 3
dim3 blockDim, gridDim;
blockDim.x = 256;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
hipLaunchKernelGGL(( LSTM_unit_fused) , dim3(gridDim), dim3(blockDim), 0, stream_h_this_layer ,
hiddenSize, miniBatch,
h_in + 4 * layer * numElements,
x_in + 4 * i * numElements,
bias + 8 * layer * hiddenSize,
TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL,
h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
x_data + i * numElements + (layer + 1) * seqLength * numElements,
c_data + i * numElements + layer * (seqLength + 1) * numElements,
c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
TRAINING);
cudaErrCheck(hipGetLastError());
}
else {
LSTM_unit_unfused(hiddenSize, miniBatch,
h_in + 4 * layer * numElements,
x_in + 4 * i * numElements,
bias + 8 * layer * hiddenSize,
h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
x_data + i * numElements + (layer + 1) * seqLength * numElements,
c_data + i * numElements + layer * (seqLength + 1) * numElements,
c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
stream_h_this_layer);
}
if (layer != numLayers - 1) {
cudaErrCheck(hipEventCreate(&events_h[layer][i], hipEventDisableTiming));
cudaErrCheck(hipEventRecord(events_h[layer][i], stream_h_this_layer));
}
}
}
}
// stop timing
cudaErrCheck(hipEventRecord(stop));
cudaErrCheck(hipEventSynchronize(stop));
cudaErrCheck(hipEventElapsedTime(&elapsedTime, start, stop));
cudaErrCheck(hipDeviceSynchronize());
// free everything
cudaErrCheck(hipFree(h_data));
cudaErrCheck(hipFree(x_data));
cudaErrCheck(hipFree(c_data));
if (weight != weight_T) cudaErrCheck(hipFree(weight));
cudaErrCheck(hipFree(weight_T));
cudaErrCheck(hipFree(bias));
cudaErrCheck(hipFree(h_in));
cudaErrCheck(hipFree(x_in));
if (TRAINING) cudaErrCheck(hipFree(linearGates));
if (USE_GEMM_STREAMS) {
cudaErrCheck(hipStreamDestroy(stream_x_single));
cudaErrCheck(hipStreamDestroy(stream_h_single));
}
else {
for (int i = 0; i < numLayers; i++) {
if (stream_x[i] != NULL) cudaErrCheck(hipStreamDestroy(stream_x[i]));
if (stream_h[i] != NULL) cudaErrCheck(hipStreamDestroy(stream_h[i]));
}
}
free(stream_x);
free(stream_h);
for (int i = 0; i < numLayers; i++) {
free(events_x[i]);
free(events_h[i]);
}
free(events_x);
free(events_h);
return elapsedTime;
}
int main(int argc, char* argv[]) {
int seqLength;
int numLayers;
int hiddenSize;
int miniBatch;
if (argc == 5) {
seqLength = atoi(argv[1]);
numLayers = atoi(argv[2]);
hiddenSize = atoi(argv[3]);
miniBatch = atoi(argv[4]);
}
else if (argc == 1) {
printf("Running with default settings\n");
seqLength = 100;
numLayers = 4;
hiddenSize = 512;
miniBatch = 64;
}
else {
printf("Usage: ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch>\n");
return 1;
}
printf("seqLength %d, numLayers %d, hiddenSize %d, miniBatch %d\n", seqLength, numLayers, hiddenSize, miniBatch);
int numRuns = 1;
float totalTime = 0.f;
for (int run = 0; run < numRuns; run++) {
totalTime += LSTMTest(hiddenSize, miniBatch, seqLength, numLayers, true);
}
printf("Runtime %f ms\n", totalTime / numRuns);
return time < 0;
}
| 50b27de05789061aea76d961abf2586493096aa7.cu | #include <stdio.h>
#include <cublas_v2.h>
#include <curand.h>
#include <stdlib.h>
#define TRAINING (false)
#define PINNED_MEM 0
#define GROUP_GEMM 0
#define USE_GEMM_STREAMS 1
#define FUSE_PW 0
#define PRE_TRANSPOSE 0
#define RECUR_BATCH_SIZE 1
#define USE_LAYERS_STREAMS 0
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) {
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
// Device functions
__forceinline__ __device__ float sigmoidf(float in) {
return 1.f / (1.f + expf(-in));
}
// Pointwise functions
__global__ void pw_biasAdd(float *y, float *bias, int n, int nBias) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] += bias[i % nBias];
}
__global__ void pw_vecAdd(float *y, float *a, float *b, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a[i] + b[i];
}
__global__ void pw_vecMul(float *y, float *a, float *b, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a[i] * b[i];
}
__global__ void pw_tanh(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = tanh(a[i]);
}
__global__ void pw_sigmoid(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = sigmoidf(a[i]);
}
// Unfused LSTM (calling many pointwise kernels).
int LSTM_unit_unfused(int hiddenSize,
int miniBatch,
float * __restrict__ h_in, // h(t-1) * R
float * __restrict__ x_in, // x(t) * W
float * __restrict__ bias,
float * __restrict__ h_out,// h(t)
float * __restrict__ x_out,
float * __restrict__ c_in, // c(t-1)
float * __restrict__ c_out,// c(t)
cudaStream_t stream) {
dim3 blockDim, gridDim;
int numElements = hiddenSize * miniBatch;
blockDim.x = 128;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
// element wise calculations
// x(t) = x(t) * W + h(t-1) * R + bias, as input to this unit
for (int i = 0; i < 4; i++) {
pw_vecAdd <<< gridDim, blockDim, 0, stream >>> (x_in + i * numElements,
x_in + i * numElements,
h_in + i * numElements,
numElements);
cudaErrCheck(cudaGetLastError());
pw_biasAdd <<< gridDim, blockDim, 0, stream >>> (x_in + i * numElements,
bias + i * hiddenSize,
numElements,
hiddenSize);
cudaErrCheck(cudaGetLastError());
pw_biasAdd <<< gridDim, blockDim, 0, stream >>> (x_in + i * numElements,
bias + (i + 4) * hiddenSize,
numElements,
hiddenSize);
cudaErrCheck(cudaGetLastError());
}
// x(t) goes through 4 gates' activation
pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (x_in + 0 * numElements, x_in + 0 * numElements, numElements);
cudaErrCheck(cudaGetLastError());
pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (x_in + 1 * numElements, x_in + 1 * numElements, numElements);
cudaErrCheck(cudaGetLastError());
pw_tanh <<< gridDim, blockDim, 0, stream >>> (x_in + 2 * numElements, x_in + 2 * numElements, numElements);
cudaErrCheck(cudaGetLastError());
pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (x_in + 3 * numElements, x_in + 3 * numElements, numElements);
cudaErrCheck(cudaGetLastError());
// assign location to 4 gates
float *in_gate = x_in + 0 * numElements;
float *forget_gate = x_in + 1 * numElements;
float *in_gate2 = x_in + 2 * numElements;
float *out_gate = x_in + 3 * numElements;
// f(t) *= c(t-1)
pw_vecMul <<< gridDim, blockDim, 0, stream >>> (forget_gate, forget_gate, c_in, numElements);
cudaErrCheck(cudaGetLastError());
// i(t) *= g(t)
pw_vecMul <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, in_gate2, numElements);
cudaErrCheck(cudaGetLastError());
// i(t) += f(t)
pw_vecAdd <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, forget_gate, numElements);
cudaErrCheck(cudaGetLastError());
// c(t) = i(t), output cell state
cudaErrCheck(cudaMemcpyAsync(c_out, in_gate, numElements * sizeof(float), cudaMemcpyDeviceToDevice, stream));
// i(t) = tanh(i(t)), i(t) === c(t) here, but we must not modify c(t)
pw_tanh <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, numElements);
cudaErrCheck(cudaGetLastError());
// h(t) = i(t) * o(t)
pw_vecMul <<< gridDim, blockDim, 0, stream >>> (h_out, out_gate, in_gate, numElements);
cudaErrCheck(cudaGetLastError());
cudaErrCheck(cudaMemcpyAsync(x_out, h_out, numElements * sizeof(float), cudaMemcpyDeviceToDevice, stream));
return 0;
}
__global__ void LSTM_unit_fused(int hiddenSize,
int miniBatch,
float * __restrict__ h_in,
float * __restrict__ x_in,
float * __restrict__ bias,
float * __restrict__ linearGates,
float * __restrict__ h_out,
float * __restrict__ x_out,
float * __restrict__ c_in,
float * __restrict__ c_out,
bool training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int numElements = miniBatch * hiddenSize;
if (index >= numElements) return;
int currentBatch = index / hiddenSize;
int gateIndex = (index % hiddenSize) + 4 * currentBatch * hiddenSize;
float gate[4];
for (int i = 0; i < 4; i++) {
gate[i] = x_in[i * hiddenSize + gateIndex] + h_in[i * hiddenSize + gateIndex];
gate[i] += bias[i * hiddenSize + index % hiddenSize] + bias[(i + 4) * hiddenSize + index % hiddenSize];
if (training) linearGates[gateIndex + i * hiddenSize] = gate[i];
}
float in_gate = sigmoidf(gate[0]);
float forget_gate = sigmoidf(gate[1]);
float in_gate2 = tanhf(gate[2]);
float out_gate = sigmoidf(gate[3]);
float value = (c_in[index] * forget_gate) + (in_gate * in_gate2);
c_out[index] = value;
value = out_gate * tanhf(value);
h_out[index] = value;
x_out[index] = value;
}
float LSTMTest(int hiddenSize, int miniBatch, int seqLength, int numLayers, bool checkF) {
int numElements = hiddenSize * miniBatch;
// alloc device memory
float *h_data, *x_data, *c_data;
cudaErrCheck(cudaMalloc((void**)&h_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&x_data, (seqLength) * (numLayers + 1) * numElements * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&c_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float)));
float *weight, *weight_T;
cudaErrCheck(cudaMalloc((void**)&weight, numLayers * hiddenSize * hiddenSize * 8 * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&weight_T, numLayers * hiddenSize * hiddenSize * 8 * sizeof(float)));
float *bias;
cudaErrCheck(cudaMalloc((void**)&bias, numLayers * hiddenSize * 8 * sizeof(float)));
float *h_in, *x_in;
cudaErrCheck(cudaMalloc((void**)&h_in, 4 * numLayers * numElements * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&x_in, 4 * seqLength * numElements * sizeof(float)));
float *linearGates;
// Activations
if (TRAINING) {
cudaErrCheck(cudaMalloc((void**)&linearGates, 4 * seqLength * numLayers * numElements * sizeof(float)));
}
// operation wise streams for optimization 2
cudaStream_t stream_x_single, stream_h_single;
// (operation + layer) wise streams for optimization 6
cudaStream_t *stream_x, *stream_h;
stream_x = (cudaStream_t*)malloc(numLayers * sizeof(cudaStream_t));
stream_h = (cudaStream_t*)malloc(numLayers * sizeof(cudaStream_t));
if (USE_GEMM_STREAMS) {
// optimization 2 uses different streams for x and h
cudaErrCheck(cudaStreamCreate(&stream_x_single));
cudaErrCheck(cudaStreamCreateWithPriority(&stream_h_single, 0, -1));
}
else {
for (int i = 0; i < numLayers; i++) {
if (USE_LAYERS_STREAMS) {
// optimization 6 uses different streams for various layers
cudaErrCheck(cudaStreamCreate(&stream_x[i]));
cudaErrCheck(cudaStreamCreateWithPriority(&stream_h[i], 0, -1));
}
else {
stream_x[i] = NULL;
stream_h[i] = NULL;
}
}
}
// alloc events
cudaEvent_t **events_x, **events_h;
events_x = (cudaEvent_t**)malloc(numLayers * sizeof(cudaEvent_t*));
events_h = (cudaEvent_t**)malloc(numLayers * sizeof(cudaEvent_t*));
for (int i = 0; i < numLayers; i++) {
events_x[i] = (cudaEvent_t*)malloc(seqLength * sizeof(cudaEvent_t));
events_h[i] = (cudaEvent_t*)malloc(seqLength * sizeof(cudaEvent_t));
}
// initiate random inputs
curandGenerator_t gen;
curandErrCheck(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(curandSetPseudoRandomGeneratorSeed(gen, 1782ULL));
//cudaErrCheck(cudaMemset(h_data, 0, (numLayers) * (seqLength + 1) * numElements * sizeof(float)));
//cudaErrCheck(cudaMemset(c_data, 0, (numLayers) * (seqLength + 1) * numElements * sizeof(float)));
curandErrCheck(curandGenerateUniform(gen, h_data, (seqLength + 1) * (numLayers) * numElements));
curandErrCheck(curandGenerateUniform(gen, c_data, (seqLength + 1) * (numLayers) * numElements));
curandErrCheck(curandGenerateUniform(gen, x_data, (seqLength) * (numLayers + 1) * numElements));
curandErrCheck(curandGenerateUniform(gen, weight, numLayers * hiddenSize * hiddenSize * 8));
curandErrCheck(curandGenerateUniform(gen, bias, numLayers * hiddenSize * 8));
curandErrCheck(curandDestroyGenerator(gen));
// create cuBLAS handle.
cublasHandle_t handle;
cublasErrCheck(cublasCreate(&handle));
cudaErrCheck(cudaDeviceSynchronize());
// start timing
float elapsedTime;
cudaEvent_t start, stop;
cudaErrCheck(cudaEventCreate(&start));
cudaErrCheck(cudaEventCreate(&stop));
cudaErrCheck(cudaEventRecord(start));
// LSTM
const cublasOperation_t a_trans = (PRE_TRANSPOSE && (seqLength > 1)) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t b_trans = CUBLAS_OP_N; // always N
// cublasSgemm(): C = alpha * (A + B) + beta * C
float alpha = 1.f;
float beta = 0.f;
if (a_trans == CUBLAS_OP_N) {
// do optimization 4 here, transpose A
for (int layer = 0; layer <numLayers; layer++) {
// determine whether using same streams among layers
cudaStream_t stream_x_this_layer, stream_h_this_layer;
if (USE_GEMM_STREAMS) {
stream_x_this_layer = stream_x_single;
stream_h_this_layer = stream_h_single;
}
else {
stream_x_this_layer = stream_x[layer];
stream_h_this_layer = stream_h[layer];
}
// for x(t)
float *W_weight_in = weight + layer * hiddenSize * hiddenSize * 8;
float *W_weight_out = weight_T + layer * hiddenSize * hiddenSize * 8;
// for h(t-1)
float *R_weight_in = weight + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4;
float *R_weight_out = weight_T + layer *hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4;
cublasErrCheck(cublasSetStream(handle, stream_x_this_layer));
cublasErrCheck(cublasSgeam(handle, CUBLAS_OP_T, // trans A
CUBLAS_OP_N, // trans B
4 * hiddenSize, // #rows in A & C
hiddenSize, // #cols in B & C
&alpha, // scale A
W_weight_in, // A
hiddenSize, // leading dim in A
&beta, // scale B
NULL, // B
4 * hiddenSize, // leading dim in B
W_weight_out, // C
4 * hiddenSize)); // leading dim in C
cublasErrCheck(cublasSetStream(handle, stream_h_this_layer));
cublasErrCheck(cublasSgeam(handle, CUBLAS_OP_T, // trans A
CUBLAS_OP_N, // trans B
4 * hiddenSize, // #rows in A & C
hiddenSize, // #cols in B & C
&alpha, // scale A
R_weight_in, // A
hiddenSize, // leading dim in A
&beta, // scale B
NULL, // B
4 * hiddenSize, // leading dim in B
R_weight_out, // C
4 * hiddenSize)); // leading dim in C
}
}
else {
weight_T = weight;
}
int lStart = 0; // layer starts from
int lEnd = 0; // layer ends at
int tStart = 0; // timestep starts from
int tEnd = 0; // timestep ends at
int recurBatchSize = RECUR_BATCH_SIZE; // optimization 5 will make it 2
while (true) {
// Many layer "scheduling".
if (lEnd == 0) {
lStart = 0;
lEnd = 1;
tStart = 0;
}
else {
// Move "up" and "left"
lStart++;
lEnd++;
tStart -= recurBatchSize;
// Over the top or off the left, reset to layer 0
if (lEnd > numLayers || tStart < 0) {
tStart += (lStart + 1) * recurBatchSize;
lStart = 0;
lEnd = 1;
}
// Off the right, step up
while (tStart >= seqLength && lEnd <= numLayers) {
lStart++;
lEnd++;
tStart -= recurBatchSize;
}
// Over the top or off the left, done!
if (lEnd > numLayers || tStart < 0) {
break;
}
}
tEnd = tStart + recurBatchSize;
if (tEnd > seqLength) tEnd = seqLength;
// lStart, lEnd always differ 1
for (int layer = lStart; layer < lEnd; layer++) {
// determine whether using same streams among layers
cudaStream_t stream_x_this_layer, stream_h_this_layer;
if (USE_GEMM_STREAMS) {
stream_x_this_layer = stream_x_single;
stream_h_this_layer = stream_h_single;
}
else {
stream_x_this_layer = stream_x[layer];
stream_h_this_layer = stream_h[layer];
}
// do x(t) * W_weight on stream_x[layer]
cublasErrCheck(cublasSetStream(handle, stream_x_this_layer));
// tStart, tEnd differ recurBatchSize
for (int i = tStart; i < tEnd; i++) {
if (layer > 0) {
cudaErrCheck(cudaStreamWaitEvent(stream_x_this_layer, events_h[layer - 1][i], 0));
cudaErrCheck(cudaEventDestroy(events_h[layer - 1][i]));
}
}
// x(t) *= [W_weight]
if (GROUP_GEMM) {
// do optimization 1 here
cublasErrCheck(cublasSgemm(handle,
a_trans, b_trans,
4 * hiddenSize, // #rows of A and C
miniBatch * (tEnd - tStart), // #cols of B and C
hiddenSize, // #cols of A and B
&alpha,
&weight_T[layer * 8 * hiddenSize * hiddenSize], // A
a_trans == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize, // leading dimension of A, where we can try different data layout
x_data + tStart * numElements + layer * seqLength * numElements, // B
hiddenSize, // leading dimension of B, where we can try different data layout
&beta,
x_in + 4 * tStart * numElements, // C
4 * hiddenSize // leading dimension of C
));
}
else {
for (int igemm =0; igemm < 4; igemm++) {
cublasErrCheck(cublasSgemm(handle,
a_trans, b_trans,
hiddenSize, // #rows of A and C
miniBatch * (tEnd - tStart), // #cols of B and C
hiddenSize, // #cols of A and B
&alpha,
&weight_T[layer * 8 * hiddenSize * hiddenSize + igemm * hiddenSize], // A
a_trans == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize, // leading dimension of A, where we can try different data layout
x_data + tStart * numElements + layer * seqLength * numElements, // B
hiddenSize, // leading dimension of B, where we can try different data layout
&beta,
x_in + 4 * tStart * numElements + igemm * hiddenSize, // C
4 * hiddenSize // leading dimension of C
));
}
}
for (int i = tStart; i < tEnd; i++) {
cudaErrCheck(cudaEventCreate(&events_x[layer][i], cudaEventDisableTiming));
cudaErrCheck(cudaEventRecord(events_x[layer][i], stream_x_this_layer));
}
for (int i = tStart; i < tEnd; i++) {
// do h(t-1) *= [R_weight] on stream_h[layer]
cublasErrCheck(cublasSetStream(handle, stream_h_this_layer));
// h(t-1) *= [R_weight]
if (GROUP_GEMM) {
// do optimization 1 here
cublasErrCheck(cublasSgemm(handle,
a_trans, b_trans,
4 * hiddenSize, miniBatch, hiddenSize,
&alpha,
&weight_T[4 * hiddenSize * hiddenSize + layer * 8 * hiddenSize * hiddenSize],
a_trans == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize,
h_data + i * numElements + layer * (seqLength + 1) * numElements,
hiddenSize,
&beta,
h_in + 4 * layer * numElements,
4 * hiddenSize));
}
else {
for (int igemm =0; igemm < 4; igemm++) {
cublasErrCheck(cublasSgemm(handle,
a_trans, b_trans,
hiddenSize, miniBatch, hiddenSize,
&alpha,
&weight_T[4 * hiddenSize * hiddenSize + layer * 8 * hiddenSize * hiddenSize + igemm * hiddenSize],
a_trans == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize,
h_data + i * numElements + layer * (seqLength + 1) * numElements,
hiddenSize,
&beta,
h_in + 4 * layer * numElements + igemm * hiddenSize,
4 * hiddenSize));
}
}
cudaErrCheck(cudaStreamWaitEvent(stream_h_this_layer, events_x[layer][i], 0));
cudaErrCheck(cudaEventDestroy(events_x[layer][i]));
if (FUSE_PW) {
// optimization 3
dim3 blockDim, gridDim;
blockDim.x = 256;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
LSTM_unit_fused <<< gridDim, blockDim, 0, stream_h_this_layer >>>
(hiddenSize, miniBatch,
h_in + 4 * layer * numElements,
x_in + 4 * i * numElements,
bias + 8 * layer * hiddenSize,
TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL,
h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
x_data + i * numElements + (layer + 1) * seqLength * numElements,
c_data + i * numElements + layer * (seqLength + 1) * numElements,
c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
TRAINING);
cudaErrCheck(cudaGetLastError());
}
else {
LSTM_unit_unfused(hiddenSize, miniBatch,
h_in + 4 * layer * numElements,
x_in + 4 * i * numElements,
bias + 8 * layer * hiddenSize,
h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
x_data + i * numElements + (layer + 1) * seqLength * numElements,
c_data + i * numElements + layer * (seqLength + 1) * numElements,
c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
stream_h_this_layer);
}
if (layer != numLayers - 1) {
cudaErrCheck(cudaEventCreate(&events_h[layer][i], cudaEventDisableTiming));
cudaErrCheck(cudaEventRecord(events_h[layer][i], stream_h_this_layer));
}
}
}
}
// stop timing
cudaErrCheck(cudaEventRecord(stop));
cudaErrCheck(cudaEventSynchronize(stop));
cudaErrCheck(cudaEventElapsedTime(&elapsedTime, start, stop));
cudaErrCheck(cudaDeviceSynchronize());
// free everything
cudaErrCheck(cudaFree(h_data));
cudaErrCheck(cudaFree(x_data));
cudaErrCheck(cudaFree(c_data));
if (weight != weight_T) cudaErrCheck(cudaFree(weight));
cudaErrCheck(cudaFree(weight_T));
cudaErrCheck(cudaFree(bias));
cudaErrCheck(cudaFree(h_in));
cudaErrCheck(cudaFree(x_in));
if (TRAINING) cudaErrCheck(cudaFree(linearGates));
if (USE_GEMM_STREAMS) {
cudaErrCheck(cudaStreamDestroy(stream_x_single));
cudaErrCheck(cudaStreamDestroy(stream_h_single));
}
else {
for (int i = 0; i < numLayers; i++) {
if (stream_x[i] != NULL) cudaErrCheck(cudaStreamDestroy(stream_x[i]));
if (stream_h[i] != NULL) cudaErrCheck(cudaStreamDestroy(stream_h[i]));
}
}
free(stream_x);
free(stream_h);
for (int i = 0; i < numLayers; i++) {
free(events_x[i]);
free(events_h[i]);
}
free(events_x);
free(events_h);
return elapsedTime;
}
int main(int argc, char* argv[]) {
int seqLength;
int numLayers;
int hiddenSize;
int miniBatch;
if (argc == 5) {
seqLength = atoi(argv[1]);
numLayers = atoi(argv[2]);
hiddenSize = atoi(argv[3]);
miniBatch = atoi(argv[4]);
}
else if (argc == 1) {
printf("Running with default settings\n");
seqLength = 100;
numLayers = 4;
hiddenSize = 512;
miniBatch = 64;
}
else {
printf("Usage: ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch>\n");
return 1;
}
printf("seqLength %d, numLayers %d, hiddenSize %d, miniBatch %d\n", seqLength, numLayers, hiddenSize, miniBatch);
int numRuns = 1;
float totalTime = 0.f;
for (int run = 0; run < numRuns; run++) {
totalTime += LSTMTest(hiddenSize, miniBatch, seqLength, numLayers, true);
}
printf("Runtime %f ms\n", totalTime / numRuns);
return time < 0;
}
|
6c2eee0c47805c8fe013c342d9f5eb1f0dbf26e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
// Thread block size
#define BLOCK_SIZE 4
#define MSIZE 4
// Matrices are stored in row-major order:
// M(row, col) = M[row * MSIZE + col]
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(float *, float *, float *);
int checkProduct(float * A, float * B, float * C)
//Check matrix product C = AB
{
int i,j,k; //loop variables
int fail = 0;
float tol = 1e-2;
float ABelement;
//loop over rows
for (i = 0; i < MSIZE; i++)
{
//loop over columns
for (j = 0; j < MSIZE; j++)
{
ABelement = 0.0f;
//loop to compute matrix element
for (k = 0; k < MSIZE; k++)
{
ABelement += A[i*MSIZE + k] * B[k*MSIZE + j];
}
//if matrix element is equal within tolerance
if (fabsf(C[i*MSIZE + j] - ABelement) > tol)
{
printf("Matrix product problem: C != AB\n");
printf("row %d col %d diff=%f\n", i,j,abs(C[i*MSIZE + j] - ABelement));
fail = 1;
}
if (fail == 1) break;
}
if (fail == 1) break;
}
if (fail == 0) printf("Matrix product confirmed!\n");
return fail;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(float* A, float* B, float* C)
{
float *d_A;
size_t size = MSIZE * MSIZE;// * sizeof(float);
//allocate space for matrix A on device
//**WKSP: Allocate space for A in device memory
hipMalloc(&d_A, size*sizeof(float));
//copy matrix A to device
//**WKSP: Copy A matrix from host to device
hipMemcpy(d_A, A, size*sizeof(float), hipMemcpyHostToDevice);
float *d_B;
//allocate space for matrix B on device
//**WKSP: Allocate space for B in device memory
hipMalloc(&d_B, size*sizeof(float));
//copy matrix B to device
//**WKSP: Copy B matrix from host to device
hipMemcpy(d_B, B, size*sizeof(float), hipMemcpyHostToDevice);
// Allocate C in device memory
float *d_C;
//**WKSP: Allocate space for C in device memory
hipMalloc(&d_C, size*sizeof(float));
// Invoke kernel
//**WKSP: Invoke the kernel
//MatMulKernel<<MSIZE*MSIZE/BLOCK_SIZE, MSIZE>>(d_A, d_B, d_C);
//MatMulKernel<<<MSIZE * MSIZE / BLOCK_SIZE, BLOCK_SIZE>>>(d_A, d_B, d_C);
hipLaunchKernelGGL(( MatMulKernel), dim3(MSIZE * MSIZE / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, d_A, d_B, d_C);
// Read C from device memory
//**WKSP: Copy C matrix from device to host
hipMemcpy(C, d_C, size*sizeof(float), hipMemcpyDeviceToHost);
// Free device memory
//**WKSP: Free all allocated device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float* A, float* B, float* C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
//compute the thread index
//**WKSP: Compute the thread index, int idx = ???
int idx = threadIdx.x+blockDim.x*blockIdx.x;
//compute the row and column
int row = idx / MSIZE;
int col = idx - row * MSIZE;
for (int i = 0; i < MSIZE; ++i)
Cvalue += A[row * MSIZE + i]
* B[i * MSIZE + col];
C[idx] = Cvalue;
}
int main(int argc, char** argv)
{
float *matA, *matB, *matC;
int i, j; //row and column indices
uint size = MSIZE * MSIZE * sizeof(float);
// Allocate space for the matrices
matA = (float *) malloc(size);
matB = (float *) malloc(size);
matC = (float *) malloc(size);
// Seed the random number generator
srand( time(NULL) );
// Generate a random value for each element of A and B
for( i = 0; i < MSIZE; i++)
{
for( j = 0; j < MSIZE; j++)
{
matA[i * MSIZE + j] = rand() / (float) RAND_MAX;
matB[i * MSIZE + j] = rand() / (float) RAND_MAX;
}
}
//Multiply the matrices
MatMul(matA, matB, matC);
//Check our work on the host
if (checkProduct(matA, matB, matC) != 0)
printf("Your program may have errors\n");
return 0;
}
| 6c2eee0c47805c8fe013c342d9f5eb1f0dbf26e1.cu | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
// Thread block size
#define BLOCK_SIZE 4
#define MSIZE 4
// Matrices are stored in row-major order:
// M(row, col) = M[row * MSIZE + col]
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(float *, float *, float *);
int checkProduct(float * A, float * B, float * C)
//Check matrix product C = AB
{
int i,j,k; //loop variables
int fail = 0;
float tol = 1e-2;
float ABelement;
//loop over rows
for (i = 0; i < MSIZE; i++)
{
//loop over columns
for (j = 0; j < MSIZE; j++)
{
ABelement = 0.0f;
//loop to compute matrix element
for (k = 0; k < MSIZE; k++)
{
ABelement += A[i*MSIZE + k] * B[k*MSIZE + j];
}
//if matrix element is equal within tolerance
if (fabsf(C[i*MSIZE + j] - ABelement) > tol)
{
printf("Matrix product problem: C != AB\n");
printf("row %d col %d diff=%f\n", i,j,abs(C[i*MSIZE + j] - ABelement));
fail = 1;
}
if (fail == 1) break;
}
if (fail == 1) break;
}
if (fail == 0) printf("Matrix product confirmed!\n");
return fail;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(float* A, float* B, float* C)
{
float *d_A;
size_t size = MSIZE * MSIZE;// * sizeof(float);
//allocate space for matrix A on device
//**WKSP: Allocate space for A in device memory
cudaMalloc(&d_A, size*sizeof(float));
//copy matrix A to device
//**WKSP: Copy A matrix from host to device
cudaMemcpy(d_A, A, size*sizeof(float), cudaMemcpyHostToDevice);
float *d_B;
//allocate space for matrix B on device
//**WKSP: Allocate space for B in device memory
cudaMalloc(&d_B, size*sizeof(float));
//copy matrix B to device
//**WKSP: Copy B matrix from host to device
cudaMemcpy(d_B, B, size*sizeof(float), cudaMemcpyHostToDevice);
// Allocate C in device memory
float *d_C;
//**WKSP: Allocate space for C in device memory
cudaMalloc(&d_C, size*sizeof(float));
// Invoke kernel
//**WKSP: Invoke the kernel
//MatMulKernel<<MSIZE*MSIZE/BLOCK_SIZE, MSIZE>>(d_A, d_B, d_C);
//MatMulKernel<<<MSIZE * MSIZE / BLOCK_SIZE, BLOCK_SIZE>>>(d_A, d_B, d_C);
MatMulKernel<<<MSIZE * MSIZE / BLOCK_SIZE, BLOCK_SIZE>>>(d_A, d_B, d_C);
// Read C from device memory
//**WKSP: Copy C matrix from device to host
cudaMemcpy(C, d_C, size*sizeof(float), cudaMemcpyDeviceToHost);
// Free device memory
//**WKSP: Free all allocated device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float* A, float* B, float* C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
//compute the thread index
//**WKSP: Compute the thread index, int idx = ???
int idx = threadIdx.x+blockDim.x*blockIdx.x;
//compute the row and column
int row = idx / MSIZE;
int col = idx - row * MSIZE;
for (int i = 0; i < MSIZE; ++i)
Cvalue += A[row * MSIZE + i]
* B[i * MSIZE + col];
C[idx] = Cvalue;
}
int main(int argc, char** argv)
{
float *matA, *matB, *matC;
int i, j; //row and column indices
uint size = MSIZE * MSIZE * sizeof(float);
// Allocate space for the matrices
matA = (float *) malloc(size);
matB = (float *) malloc(size);
matC = (float *) malloc(size);
// Seed the random number generator
srand( time(NULL) );
// Generate a random value for each element of A and B
for( i = 0; i < MSIZE; i++)
{
for( j = 0; j < MSIZE; j++)
{
matA[i * MSIZE + j] = rand() / (float) RAND_MAX;
matB[i * MSIZE + j] = rand() / (float) RAND_MAX;
}
}
//Multiply the matrices
MatMul(matA, matB, matC);
//Check our work on the host
if (checkProduct(matA, matB, matC) != 0)
printf("Your program may have errors\n");
return 0;
}
|
3fe3a34ad80fcd97b44e74dc75fd01ab8a357958.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call.
// this is a No-op in release builds.
// inline suggests to the compiler to define this function in
// a way that it can be replaceable. This can speed up execution.
// this presents the compiler from going through the normal function
// overhead when it is called. It isn't looked up. It is compiled so
// that the instructions are just right there. This is used when the
// function has a small number of instructions.
inline hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
// Copies the data between the device and the host. It also takes
// performance measurements.
// If the transfers are small, then it would be better in a real
// application to do them batched transfers. You can do this by
// using a temporary array, preferably pinned, and packing all
// of the data that needs to be transferred into it. Transfer
// it when ready.
// this method can be used: (there is also a 3D version)
// hipMemcpy2D(dest, dest_pitch, src, src_pitch, w, h, hipMemcpyHostToDevice)
void profileCopies(float* h_a, float* h_b, float* d, unsigned int n, char* desc) {
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
//events for timing
hipEvent_t startEvent, stopEvent;
checkCuda(hipEventCreate(&startEvent));
checkCuda(hipEventCreate(&stopEvent));
// record the time it takes for the host to copy the
// data over to the device.
// Note, it's better to do this kind of analysis with
// nvprof or Nsight rather than instrument the code.
checkCuda(hipEventRecord(startEvent, 0));
checkCuda(hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice));
checkCuda(hipEventRecord(stopEvent, 0));
checkCuda(hipEventSynchronize(stopEvent));
// print result
float time;
checkCuda(hipEventElapsedTime(&time, startEvent, stopEvent));
printf(" Host to Deice bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
// Do the same thing from the device back to the host.
checkCuda(hipEventRecord(startEvent, 0));
checkCuda(hipMemcpy(h_b, d, bytes, hipMemcpyDeviceToHost));
checkCuda(hipEventRecord(stopEvent, 0));
checkCuda(hipEventSynchronize(stopEvent));
checkCuda(hipEventElapsedTime(&time, startEvent, stopEvent));
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for (int i = 0; i < n; i++) {
if (h_a[i] != h_b[i]) {
printf("*** %s transfers failed *** \n", desc);
break;
}
}
// clean up events
checkCuda(hipEventDestroy(startEvent));
checkCuda(hipEventDestroy(stopEvent));
}
int main()
{
unsigned int nElements = 4 * 1024 * 1024;
const unsigned int bytes = nElements * sizeof(float);
//host arrays
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
// device array
float *d_a;
// allocate and initialize
h_aPageable = (float*)malloc(bytes); // host pageable
h_bPageable = (float*)malloc(bytes); // host pageable
checkCuda(hipHostMalloc((void**)&h_aPinned, bytes)); // host pinned
checkCuda(hipHostMalloc((void**)&h_bPinned, bytes)); // host pinned
checkCuda(hipMalloc((void**)&d_a, bytes)); // device
for (int i = 0; i < nElements; ++i) {
h_aPageable[i] = i;
}
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPageable, 0, bytes);
// output device info and transfer size
hipDeviceProp_t prop;
checkCuda(hipGetDeviceProperties(&prop, 0));
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n", bytes / (1024 * 1024));
// perform copies and report bandwidth
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
printf("\n");
// cleanup
hipFree(d_a);
hipHostFree(h_aPinned);
hipHostFree(h_bPinned);
free(h_aPageable);
free(h_bPageable);
// On my machine the pinned memory is over 3 times faster.
// This is all device dependent, however.
// Do not overuse Pinned Memory though. It limits the memory
// available to the operating system, etc. So, test to make
// sure the application is working suitably.
// Ultimately, take care to minimize the number of transfers
// and to optomize them when they must happen. This is the
// bottleneck of hybrid CPU/GPU computing.
return 0;
} | 3fe3a34ad80fcd97b44e74dc75fd01ab8a357958.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call.
// this is a No-op in release builds.
// inline suggests to the compiler to define this function in
// a way that it can be replaceable. This can speed up execution.
// this presents the compiler from going through the normal function
// overhead when it is called. It isn't looked up. It is compiled so
// that the instructions are just right there. This is used when the
// function has a small number of instructions.
inline cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
// Copies the data between the device and the host. It also takes
// performance measurements.
// If the transfers are small, then it would be better in a real
// application to do them batched transfers. You can do this by
// using a temporary array, preferably pinned, and packing all
// of the data that needs to be transferred into it. Transfer
// it when ready.
// this method can be used: (there is also a 3D version)
// cudaMemcpy2D(dest, dest_pitch, src, src_pitch, w, h, cudaMemcpyHostToDevice)
void profileCopies(float* h_a, float* h_b, float* d, unsigned int n, char* desc) {
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
//events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda(cudaEventCreate(&startEvent));
checkCuda(cudaEventCreate(&stopEvent));
// record the time it takes for the host to copy the
// data over to the device.
// Note, it's better to do this kind of analysis with
// nvprof or Nsight rather than instrument the code.
checkCuda(cudaEventRecord(startEvent, 0));
checkCuda(cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice));
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
// print result
float time;
checkCuda(cudaEventElapsedTime(&time, startEvent, stopEvent));
printf(" Host to Deice bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
// Do the same thing from the device back to the host.
checkCuda(cudaEventRecord(startEvent, 0));
checkCuda(cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost));
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
checkCuda(cudaEventElapsedTime(&time, startEvent, stopEvent));
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for (int i = 0; i < n; i++) {
if (h_a[i] != h_b[i]) {
printf("*** %s transfers failed *** \n", desc);
break;
}
}
// clean up events
checkCuda(cudaEventDestroy(startEvent));
checkCuda(cudaEventDestroy(stopEvent));
}
int main()
{
unsigned int nElements = 4 * 1024 * 1024;
const unsigned int bytes = nElements * sizeof(float);
//host arrays
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
// device array
float *d_a;
// allocate and initialize
h_aPageable = (float*)malloc(bytes); // host pageable
h_bPageable = (float*)malloc(bytes); // host pageable
checkCuda(cudaMallocHost((void**)&h_aPinned, bytes)); // host pinned
checkCuda(cudaMallocHost((void**)&h_bPinned, bytes)); // host pinned
checkCuda(cudaMalloc((void**)&d_a, bytes)); // device
for (int i = 0; i < nElements; ++i) {
h_aPageable[i] = i;
}
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPageable, 0, bytes);
// output device info and transfer size
cudaDeviceProp prop;
checkCuda(cudaGetDeviceProperties(&prop, 0));
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n", bytes / (1024 * 1024));
// perform copies and report bandwidth
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
printf("\n");
// cleanup
cudaFree(d_a);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(h_aPageable);
free(h_bPageable);
// On my machine the pinned memory is over 3 times faster.
// This is all device dependent, however.
// Do not overuse Pinned Memory though. It limits the memory
// available to the operating system, etc. So, test to make
// sure the application is working suitably.
// Ultimately, take care to minimize the number of transfers
// and to optomize them when they must happen. This is the
// bottleneck of hybrid CPU/GPU computing.
return 0;
} |
d005aeb60f968998687bc36adba24049be618c88.hip | // !!! This is a file automatically generated by hipify!!!
#include "stable_merge_sort.h"
#include "time_invocation_cuda.hpp"
#include <moderngpu.cuh>
#include <util/mgpucontext.h>
#include <thrust/device_vector.h>
#include <thrust/tabulate.h>
#include <algorithm>
#include <vector>
#include <cassert>
typedef int T;
struct hash_functor
{
__host__ __device__
unsigned int operator()(unsigned int x)
{
x = (x+0x7ed55d16) + (x<<12);
x = (x^0xc761c23c) ^ (x>>19);
x = (x+0x165667b1) + (x<<5);
x = (x+0xd3a2646c) ^ (x<<9);
x = (x+0xfd7046c5) + (x<<3);
x = (x^0xb55a4f09) ^ (x>>16);
return x;
}
};
template<typename Vector>
void generate_random_data(Vector &vec)
{
thrust::tabulate(vec.begin(), vec.end(), hash_functor());
}
void do_it(cached_allocator &alloc, size_t n)
{
std::vector<T> h_data(n);
generate_random_data(h_data);
thrust::device_vector<T> d_data = h_data;
std::stable_sort(h_data.begin(), h_data.end());
::stable_merge_sort(thrust::hip::par(alloc), d_data.begin(), d_data.end(), thrust::less<T>());
hipError_t error = hipGetLastError();
if(error)
{
std::cerr << "CUDA error: " << hipGetErrorString(error) << std::endl;
}
assert(h_data == d_data);
}
void my_sort(cached_allocator *alloc, thrust::device_vector<T> *data)
{
generate_random_data(*data);
stable_merge_sort(thrust::hip::par(*alloc), data->begin(), data->end(), thrust::less<T>());
}
void sean_sort(mgpu::ContextPtr *ctx, thrust::device_vector<T> *data)
{
generate_random_data(*data);
mgpu::MergesortKeys(thrust::raw_pointer_cast(data->data()), data->size(), thrust::less<T>(), **ctx);
}
int main()
{
mgpu::ContextPtr ctx = mgpu::CreateCudaDevice(0);
cached_allocator alloc;
for(size_t n = 1; n <= 1 << 20; n <<= 1)
{
std::cout << "Testing n = " << n << std::endl;
do_it(alloc, n);
}
for(int i = 0; i < 20; ++i)
{
size_t n = hash_functor()(i) % (1 << 20);
std::cout << "Testing n = " << n << std::endl;
do_it(alloc, n);
}
thrust::device_vector<T> vec(1 << 24);
sean_sort(&ctx, &vec);
double sean_msecs = time_invocation_cuda(20, sean_sort, &ctx, &vec);
my_sort(&alloc, &vec);
double my_msecs = time_invocation_cuda(20, my_sort, &alloc, &vec);
std::cout << "Sean's time: " << sean_msecs << " ms" << std::endl;
std::cout << "My time: " << my_msecs << " ms" << std::endl;
std::cout << "My relative performance: " << sean_msecs / my_msecs << std::endl;
return 0;
}
| d005aeb60f968998687bc36adba24049be618c88.cu | #include "stable_merge_sort.h"
#include "time_invocation_cuda.hpp"
#include <moderngpu.cuh>
#include <util/mgpucontext.h>
#include <thrust/device_vector.h>
#include <thrust/tabulate.h>
#include <algorithm>
#include <vector>
#include <cassert>
typedef int T;
struct hash_functor
{
__host__ __device__
unsigned int operator()(unsigned int x)
{
x = (x+0x7ed55d16) + (x<<12);
x = (x^0xc761c23c) ^ (x>>19);
x = (x+0x165667b1) + (x<<5);
x = (x+0xd3a2646c) ^ (x<<9);
x = (x+0xfd7046c5) + (x<<3);
x = (x^0xb55a4f09) ^ (x>>16);
return x;
}
};
template<typename Vector>
void generate_random_data(Vector &vec)
{
thrust::tabulate(vec.begin(), vec.end(), hash_functor());
}
void do_it(cached_allocator &alloc, size_t n)
{
std::vector<T> h_data(n);
generate_random_data(h_data);
thrust::device_vector<T> d_data = h_data;
std::stable_sort(h_data.begin(), h_data.end());
::stable_merge_sort(thrust::cuda::par(alloc), d_data.begin(), d_data.end(), thrust::less<T>());
cudaError_t error = cudaGetLastError();
if(error)
{
std::cerr << "CUDA error: " << cudaGetErrorString(error) << std::endl;
}
assert(h_data == d_data);
}
void my_sort(cached_allocator *alloc, thrust::device_vector<T> *data)
{
generate_random_data(*data);
stable_merge_sort(thrust::cuda::par(*alloc), data->begin(), data->end(), thrust::less<T>());
}
void sean_sort(mgpu::ContextPtr *ctx, thrust::device_vector<T> *data)
{
generate_random_data(*data);
mgpu::MergesortKeys(thrust::raw_pointer_cast(data->data()), data->size(), thrust::less<T>(), **ctx);
}
int main()
{
mgpu::ContextPtr ctx = mgpu::CreateCudaDevice(0);
cached_allocator alloc;
for(size_t n = 1; n <= 1 << 20; n <<= 1)
{
std::cout << "Testing n = " << n << std::endl;
do_it(alloc, n);
}
for(int i = 0; i < 20; ++i)
{
size_t n = hash_functor()(i) % (1 << 20);
std::cout << "Testing n = " << n << std::endl;
do_it(alloc, n);
}
thrust::device_vector<T> vec(1 << 24);
sean_sort(&ctx, &vec);
double sean_msecs = time_invocation_cuda(20, sean_sort, &ctx, &vec);
my_sort(&alloc, &vec);
double my_msecs = time_invocation_cuda(20, my_sort, &alloc, &vec);
std::cout << "Sean's time: " << sean_msecs << " ms" << std::endl;
std::cout << "My time: " << my_msecs << " ms" << std::endl;
std::cout << "My relative performance: " << sean_msecs / my_msecs << std::endl;
return 0;
}
|
4b22455f79b813f1c4f167d1dd5e813574de67b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include <iostream>
#include "trace.cuh"
#include "sphere_hip.cuh"
#include "triangle.cuh"
#include "light.cuh"
#include "kernel_hip.cuh"
// checks whole scene for intersect
__device__ int find_intersect(float *pos, float *dir, float *new_pos, float *new_dir, float *normal, color_t *colors, scene_t *scene)
{
float dist_tr = FLT_MAX;
float dist_sp = FLT_MAX;
int closest_tr = -1;
int closest_sp = -1;
int sp_tr_none = -1;
int res = 0;
float tr_uv[2];
float temp_tr_uv[2];
for (int i = 0; i < scene->spheres_count; i++)
{
float temp = sphere_intersect(pos, dir, SPHERE_POS(SPHERE_INDEX(i, scene->spheres)), *SPHERE_RADIUS(SPHERE_INDEX(i, scene->spheres)));
if (temp < dist_sp)
{
dist_sp = temp * 0.99999f;
closest_sp = i;
}
}
for (int i = 0; i < scene->triangles_count; i++)
{
float temp = triangle_intersect(pos, dir, TRIANGLE_POS(TRIANGLE_INDEX(i, scene->triangles)), temp_tr_uv);
if (temp < dist_tr)
{
dist_tr = temp * 0.99999f;
closest_tr = i;
set_vec3(tr_uv, temp_tr_uv);
}
}
if (closest_sp != -1 && closest_tr != -1)
{
sp_tr_none = dist_sp <= dist_tr ? 0 : 1;
}
else
{
if (closest_sp != -1)
{
sp_tr_none = 0;
}
if (closest_tr != -1)
{
sp_tr_none = 1;
}
}
float *elem;
switch (sp_tr_none)
{
case 0:
sphere_intersect_pos(new_pos, pos, dir, dist_sp);
elem = SPHERE_INDEX(closest_sp, scene->spheres);
sphere_normal(normal, new_pos, SPHERE_POS(elem));
set_vec3(colors->ambient, SPHERE_AMBIENT(elem));
set_vec3(colors->diffuse, SPHERE_DIFFUSE(elem));
set_vec3(colors->specular, SPHERE_SPECULAR(elem));
res = 1;
break;
case 1:
elem = TRIANGLE_INDEX(closest_tr, scene->triangles);
mul(new_pos, dir, dist_tr);
add(new_pos, pos);
//triangle_pos(new_pos, tr_uv, TRIANGLE_POS(elem));
triangle_normal(normal, TRIANGLE_POS(elem));
set_vec3(colors->ambient, TRIANGLE_AMBIENT(elem));
set_vec3(colors->diffuse, TRIANGLE_DIFFUSE(elem));
set_vec3(colors->specular, TRIANGLE_SPECULAR(elem));
res = 1;
break;
}
if (res)
{
reflection(new_dir, dir, normal);
}
return res;
}
// trace single ray
__device__ void trace_ray(
float *color,
float *pos,
float *dir,
uint32_t depth,
uint32_t depth_max,
scene_t *scene,
fast_random_t *rand)
{
float new_pos[3] = {0.0f, 0.0f, 0.0f};
float new_dir[3] = {0.0f, 0.0f, 0.0f};
float normal[3] = {0.0f, 0.0f, 0.0f};
float none[3] = { 0.12f, 0.1f, 0.11f};
color_t colors;
scene->light = const_mem;
scene->spheres = &scene->light[LIGHT_SIZE * scene->light_count];
scene->triangles = &scene->spheres[SPHERE_SIZE * scene->spheres_count];
set_vec3(color, none);
if (find_intersect(pos, dir, new_pos, new_dir, normal, &colors, scene))
{
normalize(normal);
float light_color[3] = {0.0f, 0.0f, 1.0f};
calc_light(new_pos, normal, light_color, scene, &colors);
if(depth < depth_max)
{
const int c = 4;
float res_color[3];
float rv[3];
for(int i = 0; i < c; i++)
{
set_vec3(pos, new_pos); // prep for reflection
set_vec3(dir, new_dir);
init_vec3(rv, rand_f(*rand), rand_f(*rand), rand_f(*rand));
add(dir, rv);
trace_ray(res_color,
pos,
dir,
depth + 1,
depth_max,
scene,
rand);
mul(res_color, 0.25f);
add(light_color, res_color);
}
}
set_vec3(color, light_color);
}
else
{
set_vec3(color, none);
}
}
| 4b22455f79b813f1c4f167d1dd5e813574de67b4.cu | #include <stdint.h>
#include <iostream>
#include "trace.cuh"
#include "sphere.cuh"
#include "triangle.cuh"
#include "light.cuh"
#include "kernel.cuh"
// checks whole scene for intersect
__device__ int find_intersect(float *pos, float *dir, float *new_pos, float *new_dir, float *normal, color_t *colors, scene_t *scene)
{
float dist_tr = FLT_MAX;
float dist_sp = FLT_MAX;
int closest_tr = -1;
int closest_sp = -1;
int sp_tr_none = -1;
int res = 0;
float tr_uv[2];
float temp_tr_uv[2];
for (int i = 0; i < scene->spheres_count; i++)
{
float temp = sphere_intersect(pos, dir, SPHERE_POS(SPHERE_INDEX(i, scene->spheres)), *SPHERE_RADIUS(SPHERE_INDEX(i, scene->spheres)));
if (temp < dist_sp)
{
dist_sp = temp * 0.99999f;
closest_sp = i;
}
}
for (int i = 0; i < scene->triangles_count; i++)
{
float temp = triangle_intersect(pos, dir, TRIANGLE_POS(TRIANGLE_INDEX(i, scene->triangles)), temp_tr_uv);
if (temp < dist_tr)
{
dist_tr = temp * 0.99999f;
closest_tr = i;
set_vec3(tr_uv, temp_tr_uv);
}
}
if (closest_sp != -1 && closest_tr != -1)
{
sp_tr_none = dist_sp <= dist_tr ? 0 : 1;
}
else
{
if (closest_sp != -1)
{
sp_tr_none = 0;
}
if (closest_tr != -1)
{
sp_tr_none = 1;
}
}
float *elem;
switch (sp_tr_none)
{
case 0:
sphere_intersect_pos(new_pos, pos, dir, dist_sp);
elem = SPHERE_INDEX(closest_sp, scene->spheres);
sphere_normal(normal, new_pos, SPHERE_POS(elem));
set_vec3(colors->ambient, SPHERE_AMBIENT(elem));
set_vec3(colors->diffuse, SPHERE_DIFFUSE(elem));
set_vec3(colors->specular, SPHERE_SPECULAR(elem));
res = 1;
break;
case 1:
elem = TRIANGLE_INDEX(closest_tr, scene->triangles);
mul(new_pos, dir, dist_tr);
add(new_pos, pos);
//triangle_pos(new_pos, tr_uv, TRIANGLE_POS(elem));
triangle_normal(normal, TRIANGLE_POS(elem));
set_vec3(colors->ambient, TRIANGLE_AMBIENT(elem));
set_vec3(colors->diffuse, TRIANGLE_DIFFUSE(elem));
set_vec3(colors->specular, TRIANGLE_SPECULAR(elem));
res = 1;
break;
}
if (res)
{
reflection(new_dir, dir, normal);
}
return res;
}
// trace single ray
__device__ void trace_ray(
float *color,
float *pos,
float *dir,
uint32_t depth,
uint32_t depth_max,
scene_t *scene,
fast_random_t *rand)
{
float new_pos[3] = {0.0f, 0.0f, 0.0f};
float new_dir[3] = {0.0f, 0.0f, 0.0f};
float normal[3] = {0.0f, 0.0f, 0.0f};
float none[3] = { 0.12f, 0.1f, 0.11f};
color_t colors;
scene->light = const_mem;
scene->spheres = &scene->light[LIGHT_SIZE * scene->light_count];
scene->triangles = &scene->spheres[SPHERE_SIZE * scene->spheres_count];
set_vec3(color, none);
if (find_intersect(pos, dir, new_pos, new_dir, normal, &colors, scene))
{
normalize(normal);
float light_color[3] = {0.0f, 0.0f, 1.0f};
calc_light(new_pos, normal, light_color, scene, &colors);
if(depth < depth_max)
{
const int c = 4;
float res_color[3];
float rv[3];
for(int i = 0; i < c; i++)
{
set_vec3(pos, new_pos); // prep for reflection
set_vec3(dir, new_dir);
init_vec3(rv, rand_f(*rand), rand_f(*rand), rand_f(*rand));
add(dir, rv);
trace_ray(res_color,
pos,
dir,
depth + 1,
depth_max,
scene,
rand);
mul(res_color, 0.25f);
add(light_color, res_color);
}
}
set_vec3(color, light_color);
}
else
{
set_vec3(color, none);
}
}
|
3b645574ea5c21106cf5381b50ef01c3c0a3e296.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <sys/time.h>
#define BLUR_SIZE 9
#define USE_2D 0
//define the storage for the blur kernel in GPU Constant Memory
__constant__ float M_d[BLUR_SIZE];
cv::Mat imageRGBA;
cv::Mat imageGrey;
cv::Mat imageLin;
cv::Mat image;
uchar4 *d_rgbaImage__;
uchar4 *d_greyImage__;
size_t numRows() { return imageRGBA.rows; }
size_t numCols() { return imageRGBA.cols; }
const long numPixels = numRows() * numCols();
//
// dtime -
//
// utility routine to return
// the current wall clock time
//
double dtime()
{
double tseconds = 0.0;
struct timeval mytime;
gettimeofday(&mytime,(struct timezone*)0);
tseconds = (double)(mytime.tv_sec + mytime.tv_usec*1.0e-6);
return( tseconds );
}
//returns a pointer to an RGBA version of the input image
//and a pointer to the single channel grey-scale output
//on both the host and device
void preProcess(uchar4 **inputImage, uchar4 **greyImage, uchar4 **linImage,
uchar4 **d_rgbaImage, uchar4 **d_greyImage,
const std::string &filename) {
//make sure the context initializes ok
hipFree(0);
//Read Image into an OpenCV Matrix
image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
//allocate memory for the output
imageRGBA.copyTo(imageGrey);
imageRGBA.copyTo(imageLin);
//This shouldn't ever happen given the way the images are created
//at least based upon my limited understanding of OpenCV, but better to check
if (!imageRGBA.isContinuous() || !imageGrey.isContinuous()) {
std::cerr << "Images aren't continuous!! Exiting." << std::endl;
exit(1);
}
*inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0);
*greyImage = (uchar4 *)imageGrey.ptr<unsigned char>(0);
*linImage = (uchar4 *)imageLin.ptr<unsigned char>(0);
const size_t numPixels = numRows() * numCols();
//allocate memory on the device for both input and output
hipMalloc(d_rgbaImage, numPixels * sizeof(uchar4));
hipMalloc(d_greyImage, numPixels * sizeof(uchar4));
hipMemset(*d_greyImage, 0, numPixels * sizeof(uchar4)); //make sure no memory is left laying around
//copy input array to the GPU
hipMemcpy(*d_rgbaImage, *inputImage, numPixels * sizeof(uchar4), hipMemcpyHostToDevice);
d_rgbaImage__ = *d_rgbaImage;
d_greyImage__ = *d_greyImage;
}
void postProcess(const std::string& output_file) {
//TODO copy the output back to the host
const int num_pixels = numRows() * numCols();
hipMemcpy(imageGrey.ptr<unsigned char>(0), d_greyImage__, num_pixels * sizeof(uchar4), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//change in color space required by OpenCV
cv::cvtColor(imageGrey, imageGrey, CV_BGR2RGBA);
//output the image to a file
cv::imwrite(output_file.c_str(), imageGrey);
//display the output image (will only work if you are on the lab machines)
cv::imshow ("Output Image", imageGrey);
cv::waitKey(0);
////cleanup
hipFree(d_rgbaImage__);
hipFree(d_greyImage__);
}
__host__ __device__ unsigned char check(int n) {return n > 255 ? 255 : (n < 0 ? 0:n);}
__host__ __device__ int indexBounds(int ndx, int maxNdx) {
return ndx > (maxNdx - 1) ? (maxNdx - 1) : (ndx < 0 ? 0 : ndx);
}
__host__ __device__ int linearize(int c, int r, int w, int h) {
return indexBounds(c, w) + indexBounds(r, h)*w;
}
__global__
void conv1DShared(uchar4* const rgbaImage,uchar4* const greyImage,int numRows, int numCols)
{
int pix_x = (blockIdx.x * blockDim.x) + threadIdx.x;
int pix_y = (blockIdx.y * blockDim.y) + threadIdx.y;
int local_x = threadIdx.x + 1;
int local_y = threadIdx.y + 1;
__shared__ uchar4 cache[324];
if (pix_x >= 0 && pix_x < numCols && pix_y >= 0 && pix_y < numRows) {
int oneD = linearize(pix_x, pix_y, numCols, numRows);
cache[local_y * (blockDim.y + 2) + local_x] = rgbaImage[oneD];
if (threadIdx.x == 0 && threadIdx.y == 0) {
cache[0] = rgbaImage[linearize(pix_x - 1, pix_y - 1, numCols, numRows)];
cache[(blockDim.x + 1)] = rgbaImage[linearize(pix_x + blockDim.x, pix_y - 1, numCols, numRows)];
cache[(blockDim.y + 1) * (blockDim.x + 2)] = rgbaImage[linearize(pix_x - 1, pix_y + blockDim.y, numCols, numRows)];
cache[blockDim.x * blockDim.y - 1] = rgbaImage[linearize(pix_x + blockDim.x, pix_y + blockDim.y, numCols, numRows)];
}
if (threadIdx.x == 0) {
cache[(blockDim.x + 2) * local_y] = rgbaImage[linearize(pix_x - 1, pix_y, numCols,numRows)];
cache[(blockDim.x + 2) * local_y + (blockDim.y - 1)] = rgbaImage[linearize(pix_x - 1, pix_y + blockDim.y, numCols,numRows)];
}
if (threadIdx.y == 0) {
cache[local_y] = rgbaImage[linearize(pix_x, pix_y - 1, numCols,numRows)];
cache[(blockDim.y + 1) * (blockDim.x + 2) + local_x ] = rgbaImage[linearize(pix_x, pix_y + blockDim.y, numCols,numRows)];
}
__syncthreads();
float blurValx = 0;
float blurValy = 0;
float blurValz = 0;
for (int i = -1; i <= 1; ++i) {
for (int j = -1; j <= 1; ++j) {
int imgNdx = (local_y + j) * (blockDim.y + 2) + (local_x + i);
int filterNdx = linearize(1 + j, 1 + i, 3, 3);
int weight = M_d[filterNdx];
blurValx += cache[imgNdx].x * weight;
blurValy += cache[imgNdx].y * weight;
blurValz += cache[imgNdx].z * weight;
}
}
greyImage[pix_y * numCols + pix_x].x = check((int)blurValx);
greyImage[pix_y * numCols + pix_x].y = check((int)blurValy);
greyImage[pix_y * numCols + pix_x].z = check((int)blurValz);
}
}
// Takes an input image and places the sharpened version in outImage
void linearSharpen(const uchar4 *inImage, uchar4 *outImage,
size_t numRows, size_t numCols, float *linFilter) {
for (int pix_y = 0; pix_y < numRows; pix_y++) {
for (int pix_x = 0; pix_x < numCols; pix_x++) {
float blurValx = 0;
float blurValy = 0;
float blurValz = 0;
for (int i = -1; i <= 1; ++i) {
for (int j = -1; j <= 1; ++j) {
int imgNdx = linearize(pix_x + j, pix_y + i, numCols, numRows);
int filterNdx = linearize(1 +j, 1+ i, 3, 3);
int weight = linFilter[filterNdx];
blurValx += inImage[imgNdx].x * weight;
blurValy += inImage[imgNdx].y * weight;
blurValz += inImage[imgNdx].z * weight;
}
}
outImage[pix_y * numCols + pix_x].x = check((int)blurValx);
outImage[pix_y * numCols + pix_x].y = check((int)blurValy);
outImage[pix_y * numCols + pix_x].z = check((int)blurValz);
}
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage,
uchar4 * d_rgbaImage,
uchar4* d_greyImage,
size_t numRows,
size_t numCols)
{
float M_h[BLUR_SIZE]={-1.0, -1.0, -1.0, -1.0, 9.0, -1.0, -1.0, -1.0, -1.0}; //change this to whatever 1D filter you are using
hipMemcpyToSymbol(M_d,M_h, BLUR_SIZE*sizeof(float)); //allocates/copy to Constant Memory on the GPU
//temp image
uchar4 *d_greyImageTemp;
hipMalloc((void **)&d_greyImageTemp, sizeof(uchar4) * numRows*numCols);
hipMemset(d_greyImageTemp, 0, numRows*numCols * sizeof(uchar4)); //make sure no memory is left laying around
int threadSize=16;
int gridSizeX=(numCols + threadSize - 1)/threadSize;
int gridSizeY=(numRows + threadSize - 1)/threadSize;
const dim3 blockSize(threadSize, threadSize, 1);
const dim3 gridSize(gridSizeX, gridSizeY, 1);
//for (int i=0;i<30;i++){
//row
hipLaunchKernelGGL(( conv1DShared), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage,d_greyImage,numRows,numCols);
hipDeviceSynchronize();
//}
}
int main(int argc, char **argv) {
hipDeviceReset();
uchar4 *h_rgbaImage, *d_rgbaImage;
uchar4 *h_greyImage, *d_greyImage;
uchar4 *h_linImage;
std::string input_file;
std::string output_file;
if (argc == 3) {
input_file = std::string(argv[1]);
output_file = std::string(argv[2]);
}
else {
std::cerr << "Usage: ./hw input_file output_file" << std::endl;
exit(1);
}
//load the image and give us our input and output pointers
preProcess(&h_rgbaImage, &h_greyImage, &h_linImage, &d_rgbaImage, &d_greyImage, input_file);
//call the students' code
your_rgba_to_greyscale(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols());
hipDeviceSynchronize();
hipGetLastError();
printf("\n");
// Now time linear version
double startTime = dtime();
//Filter to be applied
float linearFilter[] = {-1.0, -1.0, -1.0, -1.0, 9.0, -1.0, -1.0, -1.0, -1.0};
linearSharpen(h_rgbaImage, h_linImage, numRows(), numCols(), linearFilter);
printf("Linear runtime: %lf seconds\n", dtime() - startTime);
postProcess(output_file); //prints gray image
hipDeviceReset();
return 0;
}
| 3b645574ea5c21106cf5381b50ef01c3c0a3e296.cu | #include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <sys/time.h>
#define BLUR_SIZE 9
#define USE_2D 0
//define the storage for the blur kernel in GPU Constant Memory
__constant__ float M_d[BLUR_SIZE];
cv::Mat imageRGBA;
cv::Mat imageGrey;
cv::Mat imageLin;
cv::Mat image;
uchar4 *d_rgbaImage__;
uchar4 *d_greyImage__;
size_t numRows() { return imageRGBA.rows; }
size_t numCols() { return imageRGBA.cols; }
const long numPixels = numRows() * numCols();
//
// dtime -
//
// utility routine to return
// the current wall clock time
//
double dtime()
{
double tseconds = 0.0;
struct timeval mytime;
gettimeofday(&mytime,(struct timezone*)0);
tseconds = (double)(mytime.tv_sec + mytime.tv_usec*1.0e-6);
return( tseconds );
}
//returns a pointer to an RGBA version of the input image
//and a pointer to the single channel grey-scale output
//on both the host and device
void preProcess(uchar4 **inputImage, uchar4 **greyImage, uchar4 **linImage,
uchar4 **d_rgbaImage, uchar4 **d_greyImage,
const std::string &filename) {
//make sure the context initializes ok
cudaFree(0);
//Read Image into an OpenCV Matrix
image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
//allocate memory for the output
imageRGBA.copyTo(imageGrey);
imageRGBA.copyTo(imageLin);
//This shouldn't ever happen given the way the images are created
//at least based upon my limited understanding of OpenCV, but better to check
if (!imageRGBA.isContinuous() || !imageGrey.isContinuous()) {
std::cerr << "Images aren't continuous!! Exiting." << std::endl;
exit(1);
}
*inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0);
*greyImage = (uchar4 *)imageGrey.ptr<unsigned char>(0);
*linImage = (uchar4 *)imageLin.ptr<unsigned char>(0);
const size_t numPixels = numRows() * numCols();
//allocate memory on the device for both input and output
cudaMalloc(d_rgbaImage, numPixels * sizeof(uchar4));
cudaMalloc(d_greyImage, numPixels * sizeof(uchar4));
cudaMemset(*d_greyImage, 0, numPixels * sizeof(uchar4)); //make sure no memory is left laying around
//copy input array to the GPU
cudaMemcpy(*d_rgbaImage, *inputImage, numPixels * sizeof(uchar4), cudaMemcpyHostToDevice);
d_rgbaImage__ = *d_rgbaImage;
d_greyImage__ = *d_greyImage;
}
void postProcess(const std::string& output_file) {
//TODO copy the output back to the host
const int num_pixels = numRows() * numCols();
cudaMemcpy(imageGrey.ptr<unsigned char>(0), d_greyImage__, num_pixels * sizeof(uchar4), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//change in color space required by OpenCV
cv::cvtColor(imageGrey, imageGrey, CV_BGR2RGBA);
//output the image to a file
cv::imwrite(output_file.c_str(), imageGrey);
//display the output image (will only work if you are on the lab machines)
cv::imshow ("Output Image", imageGrey);
cv::waitKey(0);
////cleanup
cudaFree(d_rgbaImage__);
cudaFree(d_greyImage__);
}
__host__ __device__ unsigned char check(int n) {return n > 255 ? 255 : (n < 0 ? 0:n);}
__host__ __device__ int indexBounds(int ndx, int maxNdx) {
return ndx > (maxNdx - 1) ? (maxNdx - 1) : (ndx < 0 ? 0 : ndx);
}
__host__ __device__ int linearize(int c, int r, int w, int h) {
return indexBounds(c, w) + indexBounds(r, h)*w;
}
__global__
void conv1DShared(uchar4* const rgbaImage,uchar4* const greyImage,int numRows, int numCols)
{
int pix_x = (blockIdx.x * blockDim.x) + threadIdx.x;
int pix_y = (blockIdx.y * blockDim.y) + threadIdx.y;
int local_x = threadIdx.x + 1;
int local_y = threadIdx.y + 1;
__shared__ uchar4 cache[324];
if (pix_x >= 0 && pix_x < numCols && pix_y >= 0 && pix_y < numRows) {
int oneD = linearize(pix_x, pix_y, numCols, numRows);
cache[local_y * (blockDim.y + 2) + local_x] = rgbaImage[oneD];
if (threadIdx.x == 0 && threadIdx.y == 0) {
cache[0] = rgbaImage[linearize(pix_x - 1, pix_y - 1, numCols, numRows)];
cache[(blockDim.x + 1)] = rgbaImage[linearize(pix_x + blockDim.x, pix_y - 1, numCols, numRows)];
cache[(blockDim.y + 1) * (blockDim.x + 2)] = rgbaImage[linearize(pix_x - 1, pix_y + blockDim.y, numCols, numRows)];
cache[blockDim.x * blockDim.y - 1] = rgbaImage[linearize(pix_x + blockDim.x, pix_y + blockDim.y, numCols, numRows)];
}
if (threadIdx.x == 0) {
cache[(blockDim.x + 2) * local_y] = rgbaImage[linearize(pix_x - 1, pix_y, numCols,numRows)];
cache[(blockDim.x + 2) * local_y + (blockDim.y - 1)] = rgbaImage[linearize(pix_x - 1, pix_y + blockDim.y, numCols,numRows)];
}
if (threadIdx.y == 0) {
cache[local_y] = rgbaImage[linearize(pix_x, pix_y - 1, numCols,numRows)];
cache[(blockDim.y + 1) * (blockDim.x + 2) + local_x ] = rgbaImage[linearize(pix_x, pix_y + blockDim.y, numCols,numRows)];
}
__syncthreads();
float blurValx = 0;
float blurValy = 0;
float blurValz = 0;
for (int i = -1; i <= 1; ++i) {
for (int j = -1; j <= 1; ++j) {
int imgNdx = (local_y + j) * (blockDim.y + 2) + (local_x + i);
int filterNdx = linearize(1 + j, 1 + i, 3, 3);
int weight = M_d[filterNdx];
blurValx += cache[imgNdx].x * weight;
blurValy += cache[imgNdx].y * weight;
blurValz += cache[imgNdx].z * weight;
}
}
greyImage[pix_y * numCols + pix_x].x = check((int)blurValx);
greyImage[pix_y * numCols + pix_x].y = check((int)blurValy);
greyImage[pix_y * numCols + pix_x].z = check((int)blurValz);
}
}
// Takes an input image and places the sharpened version in outImage
void linearSharpen(const uchar4 *inImage, uchar4 *outImage,
size_t numRows, size_t numCols, float *linFilter) {
for (int pix_y = 0; pix_y < numRows; pix_y++) {
for (int pix_x = 0; pix_x < numCols; pix_x++) {
float blurValx = 0;
float blurValy = 0;
float blurValz = 0;
for (int i = -1; i <= 1; ++i) {
for (int j = -1; j <= 1; ++j) {
int imgNdx = linearize(pix_x + j, pix_y + i, numCols, numRows);
int filterNdx = linearize(1 +j, 1+ i, 3, 3);
int weight = linFilter[filterNdx];
blurValx += inImage[imgNdx].x * weight;
blurValy += inImage[imgNdx].y * weight;
blurValz += inImage[imgNdx].z * weight;
}
}
outImage[pix_y * numCols + pix_x].x = check((int)blurValx);
outImage[pix_y * numCols + pix_x].y = check((int)blurValy);
outImage[pix_y * numCols + pix_x].z = check((int)blurValz);
}
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage,
uchar4 * d_rgbaImage,
uchar4* d_greyImage,
size_t numRows,
size_t numCols)
{
float M_h[BLUR_SIZE]={-1.0, -1.0, -1.0, -1.0, 9.0, -1.0, -1.0, -1.0, -1.0}; //change this to whatever 1D filter you are using
cudaMemcpyToSymbol(M_d,M_h, BLUR_SIZE*sizeof(float)); //allocates/copy to Constant Memory on the GPU
//temp image
uchar4 *d_greyImageTemp;
cudaMalloc((void **)&d_greyImageTemp, sizeof(uchar4) * numRows*numCols);
cudaMemset(d_greyImageTemp, 0, numRows*numCols * sizeof(uchar4)); //make sure no memory is left laying around
int threadSize=16;
int gridSizeX=(numCols + threadSize - 1)/threadSize;
int gridSizeY=(numRows + threadSize - 1)/threadSize;
const dim3 blockSize(threadSize, threadSize, 1);
const dim3 gridSize(gridSizeX, gridSizeY, 1);
//for (int i=0;i<30;i++){
//row
conv1DShared<<<gridSize, blockSize>>>(d_rgbaImage,d_greyImage,numRows,numCols);
cudaDeviceSynchronize();
//}
}
int main(int argc, char **argv) {
cudaDeviceReset();
uchar4 *h_rgbaImage, *d_rgbaImage;
uchar4 *h_greyImage, *d_greyImage;
uchar4 *h_linImage;
std::string input_file;
std::string output_file;
if (argc == 3) {
input_file = std::string(argv[1]);
output_file = std::string(argv[2]);
}
else {
std::cerr << "Usage: ./hw input_file output_file" << std::endl;
exit(1);
}
//load the image and give us our input and output pointers
preProcess(&h_rgbaImage, &h_greyImage, &h_linImage, &d_rgbaImage, &d_greyImage, input_file);
//call the students' code
your_rgba_to_greyscale(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols());
cudaDeviceSynchronize();
cudaGetLastError();
printf("\n");
// Now time linear version
double startTime = dtime();
//Filter to be applied
float linearFilter[] = {-1.0, -1.0, -1.0, -1.0, 9.0, -1.0, -1.0, -1.0, -1.0};
linearSharpen(h_rgbaImage, h_linImage, numRows(), numCols(), linearFilter);
printf("Linear runtime: %lf seconds\n", dtime() - startTime);
postProcess(output_file); //prints gray image
cudaThreadExit();
return 0;
}
|
ae80fcdbd66af15f4f62e5601bb48a2dfe68e293.hip | // !!! This is a file automatically generated by hipify!!!
#include "mpi.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include<cuda.h>
#include <future>
#include <cassert>
#define cudaCall(val) __checkCudaErrors__ ( (val), #val, __FILE__, __LINE__ )
/*
A test program that tries to call mpi_isend from within a cuda event callback.
Each rank starts S streams and sends them asynchronously to its right neighbor
in a ring. Each rank first initializes the send buffers, then issues an mpi_irecv for a device buffer,
then calls a memcpy h-to-d, followed by a kernel in a stream; the stream also enques a
callback that when executed starts the mpi_isend from a device buffer.
It then waits for everybody and copies the buffers back onto the host to print.
Author: Christoph Angerer
*/
#define USE_GPU
#define USE_CALLBACK
#define USE_BACKGROUND_ISEND
template <typename T>
inline void __checkCudaErrors__(T code, const char *func, const char *file, int line)
{
if (code) {
fprintf(stderr, "CUDA error at %s:%d code=%d \"%s\" \n",
file, line, (unsigned int)code, func);
hipDeviceReset();
exit(EXIT_FAILURE);
}
}
//keep #streams and #mpi ranks < 10 to keep the 1-digit encoding intact
#define S 8
#define N 100
__global__ void MyKernel(int myid, int *buffer)
{
buffer[threadIdx.x] += 10*myid;
}
struct CallbackInfo
{
int *send_buffer_d;
int device_id;
int dest;
int tag;
int myid;
MPI_Request send_request;
};
void CUDART_CB MyCallback(hipStream_t stream, hipError_t status, void *data){
CallbackInfo *info = (CallbackInfo*)data;
printf("Callback called: dest=%d, tag=%d\n", info->dest, info->tag);
hipDevice_t dev;
int result = hipCtxGetDevice(&dev);
printf("hipCtxGetDevice inside callback result=%d\n", result);
printf("Using device_id %d\n", info->device_id);
#ifdef USE_BACKGROUND_ISEND
auto future = std::async(std::launch::async, [&info]()
{
//need to set the device, otherwise I get a "illegal context" error
cudaCall(hipSetDevice(info->device_id));
printf("Hello from device %d tag %d\n", info->device_id, info->tag);
hipDevice_t dev;
int result = hipCtxGetDevice(&dev);
printf("hipCtxGetDevice inside callback inside background thread result=%d\n", result);
//MPI_Isend and MPI_send both deadlock here.
printf("Sending %d %p %d %d %d\n", info->myid, info->send_buffer_d, info->dest, N, info->tag);
MPI_Send(info->send_buffer_d, N, MPI_INT, info->dest, info->tag, MPI_COMM_WORLD);
printf("Bye %d %d %d\n", info->myid, info->dest, info->tag);
});
#else
//This is what we want, but it fails with a hipErrorNotPermitted in cuCtxtGetDevice()
MPI_Isend(info->send_buffer_d, N, MPI_INT, info->dest, info->tag, MPI_COMM_WORLD, &info->send_request);
#endif
}
int main(int argc, char *argv[])
{
int myid, numprocs, left, right;
int recv_buffer[S][N], send_buffer[S][N];
CallbackInfo infos[S];
MPI_Request recv_request[S];
MPI_Status status;
const char* myid_c = std::getenv("SLURM_PROCID");
if(!myid_c) {
printf("SLURM_PROCID not set");
exit (EXIT_FAILURE);
}
const char* nprocs_c = std::getenv("SLURM_NPROCS");
if(!nprocs_c) {
printf( "SLURM_NPROCS not set");
exit (EXIT_FAILURE);
}
const char* g2g_c = std::getenv("G2G");
if(!g2g_c) {
printf( "G2G not set");
exit (EXIT_FAILURE);
}
myid = atoi(myid_c);
numprocs = atoi(nprocs_c);
int g2g = atoi(g2g_c);
assert(g2g < 3 || g2g >= 0);
int numgpus = numprocs;
if(g2g!=2)
numgpus = 1;
printf("NUMPROC %d %d\n", numgpus, myid % numgpus);
#ifdef USE_GPU
// cudaCall(hipGetDeviceCount(&numgpus));
// printf("Rank %d uses device %d\n", myid, myid % numgpus);
cudaCall(hipSetDevice(myid % numgpus));
#endif
printf("NUMPROC %d %d\n", numgpus, myid % numgpus);
int provided;
MPI_Init_thread(&argc,&argv, MPI_THREAD_MULTIPLE, &provided);
if (provided < MPI_THREAD_MULTIPLE)
{
printf("ERROR: The MPI library does not have full thread support\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
// MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
// MPI_Comm_rank(MPI_COMM_WORLD, &myid);
#ifdef USE_GPU
int *recv_buffer_d[S];
hipStream_t streams[S];
#endif
right = (myid + 1) % numprocs;
left = myid - 1;
if (left < 0)
left = numprocs - 1;
#ifdef USE_GPU
if(myid == 0) printf("\nUSING GPU!\n");
#ifdef USE_CALLBACK
if(myid==0) printf("USING CALLBACK!\n");
#ifdef USE_BACKGROUND_ISEND
if(myid==0) printf("With background MPI_ISEND\n\n");
#else
if(myid==0) printf("With direct MPI_ISEND\n\n");
#endif
#else
if(myid==0) printf("USING NO CALLBACK\n\n");
#endif
// cudaCall(hipGetDeviceCount(&numgpus));
// printf("Rank %d uses device %d\n", myid, myid % numgpus);
// cudaCall(hipSetDevice(myid % numgpus));
hipDevice_t dev;
int result = hipCtxGetDevice(&dev);
printf("hipCtxGetDevice outside callback result=%d; %d\n", result, myid);
//create streams and device buffers
for(int s = 0; s < S; s++)
{
cudaCall(hipStreamCreate(&streams[s]));
cudaCall(hipMalloc(&recv_buffer_d[s], N*sizeof(int)));
cudaCall(hipMalloc(&infos[s].send_buffer_d, N*sizeof(int)));
}
#else
if(myid==0) printf("\nUSING CPU!\n\n");
#endif
//initialise send buffer elements with the stream number
for(int s = 0; s < S; s++)
{
for(int i = 0; i < N; i++)
{
send_buffer[s][i] = s;
}
}
if(myid == 1)
{
printf("Rank %d send buffer:\n", myid);
printf("=========================================\n");
for(int s = 0; s < S; s++)
{
for(int i = 0; i < N; i++)
{
printf("%2d,", send_buffer[s][i]);
}
printf("\n");
}
}
for(int s = 0; s < S; s++)
{
//kick off S receives on device
#ifdef USE_GPU
MPI_Irecv(recv_buffer_d[s], N, MPI_INT, left, s, MPI_COMM_WORLD, &recv_request[s]);
#else
MPI_Irecv(recv_buffer[s], N, MPI_INT, left, s, MPI_COMM_WORLD, &recv_request[s]);
#endif
printf("IRECV %d from %d with tag %d\n", myid, left, s);
printf("SETTING %d %d %d \n", myid,numgpus, myid % numgpus);
infos[s].device_id = myid % numgpus;
infos[s].dest = right;
infos[s].tag = s;
infos[s].myid = myid;
#ifdef USE_GPU
//enqueue asyncronous memcpy and kernel
cudaCall(hipMemcpyAsync(infos[s].send_buffer_d, send_buffer[s], N*sizeof(int), hipMemcpyHostToDevice, streams[s]));
//the kernel will add 10*myid to the send_buffer so that the result is a number xy where x is id of the sender and y is the stream
hipLaunchKernelGGL(( MyKernel), dim3(1),dim3(N),0,streams[s], myid, infos[s].send_buffer_d);
printf("Kernel %d %d %d \n", myid, infos[s].device_id, numgpus);
#ifdef USE_CALLBACK
//enqueue the isend
cudaCall(hipStreamAddCallback(streams[s], MyCallback, &infos[s], 0));
#else
cudaCall(hipStreamSynchronize(streams[s]));
printf("Before ISend %d to %d, size %d with tag %d \n", myid, infos[s].dest, N, infos[s].tag);
MPI_Isend(infos[s].send_buffer_d, N, MPI_INT, infos[s].dest, infos[s].tag, MPI_COMM_WORLD, &infos[s].send_request);
#endif
printf("ISend %d \n", myid);
#else
for(int i = 0; i < N; i++)
{
send_buffer[s][i] += 10*myid;
}
MPI_Isend(send_buffer[s], N, MPI_INT, right, s, MPI_COMM_WORLD, &infos[s].send_request);
#endif
}
for(int s = 0; s < S; s++)
{
printf("Waiting %d \n", myid);
MPI_Wait(&recv_request[s], &status);
#ifndef USE_BACKGROUND_ISEND
MPI_Wait(&infos[s].send_request, &status);
#endif
#ifdef USE_GPU
cudaCall(hipMemcpyAsync(recv_buffer[s], recv_buffer_d[s], N*sizeof(int), hipMemcpyDeviceToHost, streams[s]));
#endif
}
#ifdef USE_GPU
cudaCall(hipDeviceSynchronize());
#endif
if(myid == 0)
{
printf("Rank %d got Result:\n", myid);
printf("=========================================\n");
for(int s = 0; s < S; s++)
{
for(int i = 0; i < N; i++)
{
//initialise send buffer elements with the stream number
printf("%2d,", recv_buffer[s][i]);
}
printf("\n");
}
}
MPI_Finalize();
printf("END %d \n", myid);
#ifdef USE_GPU
for(int s = 0; s < S; s++)
{
hipStreamDestroy(streams[s]);
cudaCall(hipFree(recv_buffer_d[s]));
cudaCall(hipFree(infos[s].send_buffer_d));
}
#endif
return 0;
}
| ae80fcdbd66af15f4f62e5601bb48a2dfe68e293.cu | #include "mpi.h"
#include <stdio.h>
#include <cuda_runtime.h>
#include<cuda.h>
#include <future>
#include <cassert>
#define cudaCall(val) __checkCudaErrors__ ( (val), #val, __FILE__, __LINE__ )
/*
A test program that tries to call mpi_isend from within a cuda event callback.
Each rank starts S streams and sends them asynchronously to its right neighbor
in a ring. Each rank first initializes the send buffers, then issues an mpi_irecv for a device buffer,
then calls a memcpy h-to-d, followed by a kernel in a stream; the stream also enques a
callback that when executed starts the mpi_isend from a device buffer.
It then waits for everybody and copies the buffers back onto the host to print.
Author: Christoph Angerer
*/
#define USE_GPU
#define USE_CALLBACK
#define USE_BACKGROUND_ISEND
template <typename T>
inline void __checkCudaErrors__(T code, const char *func, const char *file, int line)
{
if (code) {
fprintf(stderr, "CUDA error at %s:%d code=%d \"%s\" \n",
file, line, (unsigned int)code, func);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
//keep #streams and #mpi ranks < 10 to keep the 1-digit encoding intact
#define S 8
#define N 100
__global__ void MyKernel(int myid, int *buffer)
{
buffer[threadIdx.x] += 10*myid;
}
struct CallbackInfo
{
int *send_buffer_d;
int device_id;
int dest;
int tag;
int myid;
MPI_Request send_request;
};
void CUDART_CB MyCallback(cudaStream_t stream, cudaError_t status, void *data){
CallbackInfo *info = (CallbackInfo*)data;
printf("Callback called: dest=%d, tag=%d\n", info->dest, info->tag);
CUdevice dev;
int result = cuCtxGetDevice(&dev);
printf("cuCtxGetDevice inside callback result=%d\n", result);
printf("Using device_id %d\n", info->device_id);
#ifdef USE_BACKGROUND_ISEND
auto future = std::async(std::launch::async, [&info]()
{
//need to set the device, otherwise I get a "illegal context" error
cudaCall(cudaSetDevice(info->device_id));
printf("Hello from device %d tag %d\n", info->device_id, info->tag);
CUdevice dev;
int result = cuCtxGetDevice(&dev);
printf("cuCtxGetDevice inside callback inside background thread result=%d\n", result);
//MPI_Isend and MPI_send both deadlock here.
printf("Sending %d %p %d %d %d\n", info->myid, info->send_buffer_d, info->dest, N, info->tag);
MPI_Send(info->send_buffer_d, N, MPI_INT, info->dest, info->tag, MPI_COMM_WORLD);
printf("Bye %d %d %d\n", info->myid, info->dest, info->tag);
});
#else
//This is what we want, but it fails with a CUDA_ERROR_NOT_PERMITTED in cuCtxtGetDevice()
MPI_Isend(info->send_buffer_d, N, MPI_INT, info->dest, info->tag, MPI_COMM_WORLD, &info->send_request);
#endif
}
int main(int argc, char *argv[])
{
int myid, numprocs, left, right;
int recv_buffer[S][N], send_buffer[S][N];
CallbackInfo infos[S];
MPI_Request recv_request[S];
MPI_Status status;
const char* myid_c = std::getenv("SLURM_PROCID");
if(!myid_c) {
printf("SLURM_PROCID not set");
exit (EXIT_FAILURE);
}
const char* nprocs_c = std::getenv("SLURM_NPROCS");
if(!nprocs_c) {
printf( "SLURM_NPROCS not set");
exit (EXIT_FAILURE);
}
const char* g2g_c = std::getenv("G2G");
if(!g2g_c) {
printf( "G2G not set");
exit (EXIT_FAILURE);
}
myid = atoi(myid_c);
numprocs = atoi(nprocs_c);
int g2g = atoi(g2g_c);
assert(g2g < 3 || g2g >= 0);
int numgpus = numprocs;
if(g2g!=2)
numgpus = 1;
printf("NUMPROC %d %d\n", numgpus, myid % numgpus);
#ifdef USE_GPU
// cudaCall(cudaGetDeviceCount(&numgpus));
// printf("Rank %d uses device %d\n", myid, myid % numgpus);
cudaCall(cudaSetDevice(myid % numgpus));
#endif
printf("NUMPROC %d %d\n", numgpus, myid % numgpus);
int provided;
MPI_Init_thread(&argc,&argv, MPI_THREAD_MULTIPLE, &provided);
if (provided < MPI_THREAD_MULTIPLE)
{
printf("ERROR: The MPI library does not have full thread support\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
// MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
// MPI_Comm_rank(MPI_COMM_WORLD, &myid);
#ifdef USE_GPU
int *recv_buffer_d[S];
cudaStream_t streams[S];
#endif
right = (myid + 1) % numprocs;
left = myid - 1;
if (left < 0)
left = numprocs - 1;
#ifdef USE_GPU
if(myid == 0) printf("\nUSING GPU!\n");
#ifdef USE_CALLBACK
if(myid==0) printf("USING CALLBACK!\n");
#ifdef USE_BACKGROUND_ISEND
if(myid==0) printf("With background MPI_ISEND\n\n");
#else
if(myid==0) printf("With direct MPI_ISEND\n\n");
#endif
#else
if(myid==0) printf("USING NO CALLBACK\n\n");
#endif
// cudaCall(cudaGetDeviceCount(&numgpus));
// printf("Rank %d uses device %d\n", myid, myid % numgpus);
// cudaCall(cudaSetDevice(myid % numgpus));
CUdevice dev;
int result = cuCtxGetDevice(&dev);
printf("cuCtxGetDevice outside callback result=%d; %d\n", result, myid);
//create streams and device buffers
for(int s = 0; s < S; s++)
{
cudaCall(cudaStreamCreate(&streams[s]));
cudaCall(cudaMalloc(&recv_buffer_d[s], N*sizeof(int)));
cudaCall(cudaMalloc(&infos[s].send_buffer_d, N*sizeof(int)));
}
#else
if(myid==0) printf("\nUSING CPU!\n\n");
#endif
//initialise send buffer elements with the stream number
for(int s = 0; s < S; s++)
{
for(int i = 0; i < N; i++)
{
send_buffer[s][i] = s;
}
}
if(myid == 1)
{
printf("Rank %d send buffer:\n", myid);
printf("=========================================\n");
for(int s = 0; s < S; s++)
{
for(int i = 0; i < N; i++)
{
printf("%2d,", send_buffer[s][i]);
}
printf("\n");
}
}
for(int s = 0; s < S; s++)
{
//kick off S receives on device
#ifdef USE_GPU
MPI_Irecv(recv_buffer_d[s], N, MPI_INT, left, s, MPI_COMM_WORLD, &recv_request[s]);
#else
MPI_Irecv(recv_buffer[s], N, MPI_INT, left, s, MPI_COMM_WORLD, &recv_request[s]);
#endif
printf("IRECV %d from %d with tag %d\n", myid, left, s);
printf("SETTING %d %d %d \n", myid,numgpus, myid % numgpus);
infos[s].device_id = myid % numgpus;
infos[s].dest = right;
infos[s].tag = s;
infos[s].myid = myid;
#ifdef USE_GPU
//enqueue asyncronous memcpy and kernel
cudaCall(cudaMemcpyAsync(infos[s].send_buffer_d, send_buffer[s], N*sizeof(int), cudaMemcpyHostToDevice, streams[s]));
//the kernel will add 10*myid to the send_buffer so that the result is a number xy where x is id of the sender and y is the stream
MyKernel<<<1,N,0,streams[s]>>>(myid, infos[s].send_buffer_d);
printf("Kernel %d %d %d \n", myid, infos[s].device_id, numgpus);
#ifdef USE_CALLBACK
//enqueue the isend
cudaCall(cudaStreamAddCallback(streams[s], MyCallback, &infos[s], 0));
#else
cudaCall(cudaStreamSynchronize(streams[s]));
printf("Before ISend %d to %d, size %d with tag %d \n", myid, infos[s].dest, N, infos[s].tag);
MPI_Isend(infos[s].send_buffer_d, N, MPI_INT, infos[s].dest, infos[s].tag, MPI_COMM_WORLD, &infos[s].send_request);
#endif
printf("ISend %d \n", myid);
#else
for(int i = 0; i < N; i++)
{
send_buffer[s][i] += 10*myid;
}
MPI_Isend(send_buffer[s], N, MPI_INT, right, s, MPI_COMM_WORLD, &infos[s].send_request);
#endif
}
for(int s = 0; s < S; s++)
{
printf("Waiting %d \n", myid);
MPI_Wait(&recv_request[s], &status);
#ifndef USE_BACKGROUND_ISEND
MPI_Wait(&infos[s].send_request, &status);
#endif
#ifdef USE_GPU
cudaCall(cudaMemcpyAsync(recv_buffer[s], recv_buffer_d[s], N*sizeof(int), cudaMemcpyDeviceToHost, streams[s]));
#endif
}
#ifdef USE_GPU
cudaCall(cudaDeviceSynchronize());
#endif
if(myid == 0)
{
printf("Rank %d got Result:\n", myid);
printf("=========================================\n");
for(int s = 0; s < S; s++)
{
for(int i = 0; i < N; i++)
{
//initialise send buffer elements with the stream number
printf("%2d,", recv_buffer[s][i]);
}
printf("\n");
}
}
MPI_Finalize();
printf("END %d \n", myid);
#ifdef USE_GPU
for(int s = 0; s < S; s++)
{
cudaStreamDestroy(streams[s]);
cudaCall(cudaFree(recv_buffer_d[s]));
cudaCall(cudaFree(infos[s].send_buffer_d));
}
#endif
return 0;
}
|
36c24a13794d26cb6a5df5f2ca2a7bb1cab66c17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define Threads 1000
#define readBlockSize 10000000
/*
A little hackish once digits of pi exceed 67~ million. See lines 85-89.
For digits of pi lower than that, commenting out lines 86-89 should do. Leave
line 85 uncommented so printf and fprintf from lines 133-134 will remain functional.
Recommendation for future optimization: Memset inputString to contain all \0, then
check in kernel. If \0, return. Removes need for hack mentioned above.
*/
__global__ void computeHistogram(char*, int*);
__global__ void blankCall() {int i = 0; if (i == 0) {} };
int main(int argc, char *argv[]) {
clock_t start, end;
double exec_time;
start = clock();
if (argc != 2) {
printf("\nError: Number of arguments incorrect.\n"
"There can only be 1 additional argument, which is the name of the file.\n"
"Ex: ./Multi-Pi pi.txt\n"
"Program gracefully terminated.\n");
exit(0);
}
FILE *input = fopen(argv[1], "r+");
FILE *output = fopen("freq.dat", "w+");
if (input == NULL) {
printf("Error: %s could not be read!\n"
"Program gracefully terminated.\n", argv[1]);
exit(1);
}
if (output == NULL) {
printf("Error: freq.dat could not be created!\n"
"Program gracefully terminated.\n");
exit(1);
}
//------------------------------------------------------------------------------------//
hipSetDevice(0);
hipStream_t stream1;
hipStreamCreate(&stream1);
hipSetDevice(1);
hipStream_t stream2;
hipStreamCreate(&stream2);
//===================================================
char* inputString1; char* inputString2;
int* histogram;
int histogram2[10] = {0}, count = 1;
char *dev_inputString1;
char *dev_inputString2;
int *dev_histogram1;
int *dev_histogram2;
hipHostMalloc((void**) &inputString1, sizeof(char) * readBlockSize, hipHostMallocDefault);
hipHostMalloc((void**) &inputString2, sizeof(char) * readBlockSize, hipHostMallocDefault);
hipHostMalloc((void**) &histogram, sizeof(int) * 10, hipHostMallocDefault);
for (int i = 0; i < 10; i++) {
histogram[i] = 0;
histogram2[i] = 0;
}
hipSetDevice(0);
hipLaunchKernelGGL(( blankCall), dim3(1), dim3(1), 0, 0, );
hipMalloc((void**)&dev_inputString1, sizeof(char) * readBlockSize);
hipMalloc((void**)&dev_histogram1, sizeof(int) * 10);
hipMemcpy(dev_histogram1, histogram, 10 * sizeof(int), hipMemcpyHostToDevice);
hipSetDevice(1);
hipLaunchKernelGGL(( blankCall), dim3(1), dim3(1), 0, 0, );
hipMalloc((void**)&dev_inputString2, sizeof(char) * readBlockSize);
hipMalloc((void**)&dev_histogram2, sizeof(int) * 10);
hipMemcpy(dev_histogram2, histogram, 10 * sizeof(int), hipMemcpyHostToDevice);
int tmp[12] = {0};
fgets(inputString1, 12, input); //12, fgets reads in 12 (including \0), so
for (int i = 0; i < 11; i++) {
tmp[inputString1[i] - '0']++;
//printf("%i ", inputString1[i] - '0');
}
// for (int i = 0; i < 5; i++) {
// printf("Run %i\n", i+1);
// fgets(inputString1, readBlockSize, input);
// hipSetDevice(0);
// hipMemcpyAsync(dev_inputString1, inputString1, readBlockSize * sizeof(char), hipMemcpyHostToDevice, stream1);
// hipLaunchKernelGGL(( computeHistogram), dim3((int)ceil(readBlockSize / Threads) + 1), dim3(Threads), 0, stream1, dev_inputString1, dev_histogram1);
// fgets(inputString2, readBlockSize, input);
// hipSetDevice(1);
// hipMemcpyAsync(dev_inputString2, inputString2, readBlockSize * sizeof(char), hipMemcpyHostToDevice, stream2);
// hipLaunchKernelGGL(( computeHistogram), dim3((int)ceil(readBlockSize / Threads) + 1), dim3(Threads), 0, stream2, dev_inputString2, dev_histogram2);
// }
while(fgets(inputString1, readBlockSize, input) != NULL) {
hipSetDevice(0);
hipMemcpyAsync(dev_inputString1, inputString1, readBlockSize * sizeof(char), hipMemcpyHostToDevice, stream1);
hipLaunchKernelGGL(( computeHistogram), dim3((int)ceil(readBlockSize / Threads) + 1), dim3(Threads), 0, stream1, dev_inputString1, dev_histogram1);
if (fgets(inputString2, readBlockSize, input) != NULL) {
hipSetDevice(1);
hipMemcpyAsync(dev_inputString2, inputString2, readBlockSize * sizeof(char), hipMemcpyHostToDevice, stream2);
hipLaunchKernelGGL(( computeHistogram), dim3((int)ceil(readBlockSize / Threads) + 1), dim3(Threads), 0, stream2, dev_inputString2, dev_histogram2);
}
hipDeviceSynchronize();
printf("GPUs Synchronized (%i)\n", count);
count++;
}
//This can be made async too
hipSetDevice(0);
hipMemcpy(histogram, dev_histogram1, 10 * sizeof(int), hipMemcpyDeviceToHost);
hipSetDevice(1);
hipMemcpy(histogram2, dev_histogram2, 10 * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < 10; i++) {
int total = histogram[i] + histogram2[i] + tmp[i];
printf("[%i]: %i\tNormalized: %f\n", i, total, (double)total/(double)100000001);
fprintf(output, "%i\t%f\n", i, (double)total/(double)100000001);
}
//===================================================
//Stopping the timer
end = clock();
exec_time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\nExecution Time: %f\n", exec_time);
//I got your back, Dr. Cho, together we save the world
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
fclose(input);
fclose(output);
}
__global__ void computeHistogram(char* inputArr, int* histArr) {
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
if (globalID >= readBlockSize) {
//printf("Returning for ID: %i", globalID);
return;
}
//Return if '.', or EOL
if (inputArr[globalID] == '\0')
return;
atomicAdd(&histArr[inputArr[globalID] - '0'], 1);
}
| 36c24a13794d26cb6a5df5f2ca2a7bb1cab66c17.cu | #include <stdio.h>
#include <stdlib.h>
#define Threads 1000
#define readBlockSize 10000000
/*
A little hackish once digits of pi exceed 67~ million. See lines 85-89.
For digits of pi lower than that, commenting out lines 86-89 should do. Leave
line 85 uncommented so printf and fprintf from lines 133-134 will remain functional.
Recommendation for future optimization: Memset inputString to contain all \0, then
check in kernel. If \0, return. Removes need for hack mentioned above.
*/
__global__ void computeHistogram(char*, int*);
__global__ void blankCall() {int i = 0; if (i == 0) {} };
int main(int argc, char *argv[]) {
clock_t start, end;
double exec_time;
start = clock();
if (argc != 2) {
printf("\nError: Number of arguments incorrect.\n"
"There can only be 1 additional argument, which is the name of the file.\n"
"Ex: ./Multi-Pi pi.txt\n"
"Program gracefully terminated.\n");
exit(0);
}
FILE *input = fopen(argv[1], "r+");
FILE *output = fopen("freq.dat", "w+");
if (input == NULL) {
printf("Error: %s could not be read!\n"
"Program gracefully terminated.\n", argv[1]);
exit(1);
}
if (output == NULL) {
printf("Error: freq.dat could not be created!\n"
"Program gracefully terminated.\n");
exit(1);
}
//------------------------------------------------------------------------------------//
cudaSetDevice(0);
cudaStream_t stream1;
cudaStreamCreate(&stream1);
cudaSetDevice(1);
cudaStream_t stream2;
cudaStreamCreate(&stream2);
//===================================================
char* inputString1; char* inputString2;
int* histogram;
int histogram2[10] = {0}, count = 1;
char *dev_inputString1;
char *dev_inputString2;
int *dev_histogram1;
int *dev_histogram2;
cudaHostAlloc((void**) &inputString1, sizeof(char) * readBlockSize, cudaHostAllocDefault);
cudaHostAlloc((void**) &inputString2, sizeof(char) * readBlockSize, cudaHostAllocDefault);
cudaHostAlloc((void**) &histogram, sizeof(int) * 10, cudaHostAllocDefault);
for (int i = 0; i < 10; i++) {
histogram[i] = 0;
histogram2[i] = 0;
}
cudaSetDevice(0);
blankCall<<<1, 1>>>();
cudaMalloc((void**)&dev_inputString1, sizeof(char) * readBlockSize);
cudaMalloc((void**)&dev_histogram1, sizeof(int) * 10);
cudaMemcpy(dev_histogram1, histogram, 10 * sizeof(int), cudaMemcpyHostToDevice);
cudaSetDevice(1);
blankCall<<<1, 1>>>();
cudaMalloc((void**)&dev_inputString2, sizeof(char) * readBlockSize);
cudaMalloc((void**)&dev_histogram2, sizeof(int) * 10);
cudaMemcpy(dev_histogram2, histogram, 10 * sizeof(int), cudaMemcpyHostToDevice);
int tmp[12] = {0};
fgets(inputString1, 12, input); //12, fgets reads in 12 (including \0), so
for (int i = 0; i < 11; i++) {
tmp[inputString1[i] - '0']++;
//printf("%i ", inputString1[i] - '0');
}
// for (int i = 0; i < 5; i++) {
// printf("Run %i\n", i+1);
// fgets(inputString1, readBlockSize, input);
// cudaSetDevice(0);
// cudaMemcpyAsync(dev_inputString1, inputString1, readBlockSize * sizeof(char), cudaMemcpyHostToDevice, stream1);
// computeHistogram<<<(int)ceil(readBlockSize / Threads) + 1, Threads, 0, stream1>>>(dev_inputString1, dev_histogram1);
// fgets(inputString2, readBlockSize, input);
// cudaSetDevice(1);
// cudaMemcpyAsync(dev_inputString2, inputString2, readBlockSize * sizeof(char), cudaMemcpyHostToDevice, stream2);
// computeHistogram<<<(int)ceil(readBlockSize / Threads) + 1, Threads, 0, stream2>>>(dev_inputString2, dev_histogram2);
// }
while(fgets(inputString1, readBlockSize, input) != NULL) {
cudaSetDevice(0);
cudaMemcpyAsync(dev_inputString1, inputString1, readBlockSize * sizeof(char), cudaMemcpyHostToDevice, stream1);
computeHistogram<<<(int)ceil(readBlockSize / Threads) + 1, Threads, 0, stream1>>>(dev_inputString1, dev_histogram1);
if (fgets(inputString2, readBlockSize, input) != NULL) {
cudaSetDevice(1);
cudaMemcpyAsync(dev_inputString2, inputString2, readBlockSize * sizeof(char), cudaMemcpyHostToDevice, stream2);
computeHistogram<<<(int)ceil(readBlockSize / Threads) + 1, Threads, 0, stream2>>>(dev_inputString2, dev_histogram2);
}
cudaDeviceSynchronize();
printf("GPUs Synchronized (%i)\n", count);
count++;
}
//This can be made async too
cudaSetDevice(0);
cudaMemcpy(histogram, dev_histogram1, 10 * sizeof(int), cudaMemcpyDeviceToHost);
cudaSetDevice(1);
cudaMemcpy(histogram2, dev_histogram2, 10 * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < 10; i++) {
int total = histogram[i] + histogram2[i] + tmp[i];
printf("[%i]: %i\tNormalized: %f\n", i, total, (double)total/(double)100000001);
fprintf(output, "%i\t%f\n", i, (double)total/(double)100000001);
}
//===================================================
//Stopping the timer
end = clock();
exec_time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\nExecution Time: %f\n", exec_time);
//I got your back, Dr. Cho, together we save the world
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
fclose(input);
fclose(output);
}
__global__ void computeHistogram(char* inputArr, int* histArr) {
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
if (globalID >= readBlockSize) {
//printf("Returning for ID: %i", globalID);
return;
}
//Return if '.', or EOL
if (inputArr[globalID] == '\0')
return;
atomicAdd(&histArr[inputArr[globalID] - '0'], 1);
}
|
286c77fc7f0f02ea0c8e65f481ba0de23c28159d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ void EstimateParForSubsample(float* subImageDefs, bool safeBounds, int inputWidth, int inputHeight, int2 & subImg, int & diameterPix)
{
diameterPix = (int)( fminf( (float)inputWidth,(float)inputHeight ) * subImageDefs[2] ); // <0,1>
subImg.x = (int)((float)inputWidth * (subImageDefs[0] + 1) * 0.5f) ;//- diameterPix / 2;
subImg.y = (int)((float)inputHeight * (subImageDefs[1] + 1) * 0.5f);// - diameterPix / 2;
int maxDiameter = min(inputWidth - 1, inputHeight - 1);
diameterPix = max(1, diameterPix);
diameterPix = min(maxDiameter, diameterPix);
if (safeBounds)
{
subImg.x = max(subImg.x, 1);
subImg.y = max(subImg.y, 1);
subImg.x = min(subImg.x, inputWidth - diameterPix - 1);
subImg.y = min(subImg.y, inputHeight - diameterPix - 1);
}
}
__global__ void RetinaTransform_FillRetinaAtomic (float * subImageDefs, float* input, int inputWidth, int inputHeight, float* output,int outputDataSize, float* retinaMask, int retinaDataSize, int retinaMaskColHint, float* retinaDataInserted)
{
int id_pxl = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int2 subImg;
int diameterPix;
bool safeBounds = 0;
int x = id_pxl % inputWidth;
int y = id_pxl/inputWidth;
EstimateParForSubsample( subImageDefs, safeBounds, inputWidth, inputHeight, subImg, diameterPix );
if (id_pxl<inputWidth*inputHeight)
{
float minDist = 999999.9; // ??>? should be written bette
int minIdx = 1;
for (int id_retinaPoint=0 ; id_retinaPoint<retinaDataSize ; id_retinaPoint++)
{
float x_mask = (retinaMask[id_retinaPoint*retinaMaskColHint]*diameterPix);
float y_mask = (retinaMask[id_retinaPoint*retinaMaskColHint+1]*diameterPix);
x_mask += subImg.x;
y_mask += subImg.y;
float dist = (x-x_mask)*(x-x_mask) + (y-y_mask)*(y-y_mask);
if (dist<minDist)
{
minDist = dist;
minIdx = id_retinaPoint;
}
}
atomicAdd(output + minIdx , input[id_pxl]);
atomicAdd(retinaDataInserted + minIdx , 1);
}
} | 286c77fc7f0f02ea0c8e65f481ba0de23c28159d.cu | #include "includes.h"
__device__ void EstimateParForSubsample(float* subImageDefs, bool safeBounds, int inputWidth, int inputHeight, int2 & subImg, int & diameterPix)
{
diameterPix = (int)( fminf( (float)inputWidth,(float)inputHeight ) * subImageDefs[2] ); // <0,1>
subImg.x = (int)((float)inputWidth * (subImageDefs[0] + 1) * 0.5f) ;//- diameterPix / 2;
subImg.y = (int)((float)inputHeight * (subImageDefs[1] + 1) * 0.5f);// - diameterPix / 2;
int maxDiameter = min(inputWidth - 1, inputHeight - 1);
diameterPix = max(1, diameterPix);
diameterPix = min(maxDiameter, diameterPix);
if (safeBounds)
{
subImg.x = max(subImg.x, 1);
subImg.y = max(subImg.y, 1);
subImg.x = min(subImg.x, inputWidth - diameterPix - 1);
subImg.y = min(subImg.y, inputHeight - diameterPix - 1);
}
}
__global__ void RetinaTransform_FillRetinaAtomic (float * subImageDefs, float* input, int inputWidth, int inputHeight, float* output,int outputDataSize, float* retinaMask, int retinaDataSize, int retinaMaskColHint, float* retinaDataInserted)
{
int id_pxl = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int2 subImg;
int diameterPix;
bool safeBounds = 0;
int x = id_pxl % inputWidth;
int y = id_pxl/inputWidth;
EstimateParForSubsample( subImageDefs, safeBounds, inputWidth, inputHeight, subImg, diameterPix );
if (id_pxl<inputWidth*inputHeight)
{
float minDist = 999999.9; // ??>? should be written bette
int minIdx = 1;
for (int id_retinaPoint=0 ; id_retinaPoint<retinaDataSize ; id_retinaPoint++)
{
float x_mask = (retinaMask[id_retinaPoint*retinaMaskColHint]*diameterPix);
float y_mask = (retinaMask[id_retinaPoint*retinaMaskColHint+1]*diameterPix);
x_mask += subImg.x;
y_mask += subImg.y;
float dist = (x-x_mask)*(x-x_mask) + (y-y_mask)*(y-y_mask);
if (dist<minDist)
{
minDist = dist;
minIdx = id_retinaPoint;
}
}
atomicAdd(output + minIdx , input[id_pxl]);
atomicAdd(retinaDataInserted + minIdx , 1);
}
} |
d8bcb20c5e38512de50dd1da44a0902ca07e61fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> s d c
*/
#include "magma_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define COMPLEX
__global__
void magma_zlarfg_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx,
magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaDoubleComplex scale;
double xnorm;
magmaDoubleComplex dxi;
#ifdef REAL
if ( n <= 1 )
#else
if ( n <= 0 )
#endif
{
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
magmaDoubleComplex alpha = *dx0;
#ifdef REAL
if ( xnorm != 0 ) {
if (i == 0) {
double beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
double alphar = MAGMA_Z_REAL(alpha);
double alphai = MAGMA_Z_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
double beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_Z_MAKE(beta, 0.);
alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha));
scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_Z_MUL(dxi, scale);
}
else {
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with |beta| = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfg_gpu(
magma_int_t n,
magmaDoubleComplex_ptr dx0,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDoubleComplex_ptr dAkk,
magma_queue_t queue )
{
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_dznrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_dznrm2_cols(n-1, 1, dx0+1, n, dxnorm, queue);
hipLaunchKernelGGL(( magma_zlarfg_gpu_kernel)
, dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
n, dx0, dx, dtau, dxnorm, dAkk);
}
| d8bcb20c5e38512de50dd1da44a0902ca07e61fa.cu | /*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> s d c
*/
#include "magma_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define COMPLEX
__global__
void magma_zlarfg_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx,
magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaDoubleComplex scale;
double xnorm;
magmaDoubleComplex dxi;
#ifdef REAL
if ( n <= 1 )
#else
if ( n <= 0 )
#endif
{
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
magmaDoubleComplex alpha = *dx0;
#ifdef REAL
if ( xnorm != 0 ) {
if (i == 0) {
double beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
double alphar = MAGMA_Z_REAL(alpha);
double alphai = MAGMA_Z_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
double beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_Z_MAKE(beta, 0.);
alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha));
scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_Z_MUL(dxi, scale);
}
else {
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with |beta| = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfg_gpu(
magma_int_t n,
magmaDoubleComplex_ptr dx0,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDoubleComplex_ptr dAkk,
magma_queue_t queue )
{
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_dznrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_dznrm2_cols(n-1, 1, dx0+1, n, dxnorm, queue);
magma_zlarfg_gpu_kernel
<<< blocks, threads, 0, queue->cuda_stream() >>>
(n, dx0, dx, dtau, dxnorm, dAkk);
}
|
16518f5d1fbb0426ff4195dd316278a92da26203.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/SpatialConvolutionLocal.hip"
#else
static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput,
THCTensor *weight, THCTensor *bias,
int kH, int kW, int dH,
int dW, int padH, int padW,
int64_t inputHeight, int64_t inputWidth,
int64_t outputHeight, int64_t outputWidth) {
THArgCheck(kW > 0 && kH > 0, 9,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
int64_t nInputPlane = weight->size(2) / (kH * kW);
int64_t nOutputPlane = weight->size(1);
if (bias != NULL) {
THCUNN_check_dim_size(state, bias, 3, 0, nOutputPlane);
THCUNN_check_dim_size(state, bias, 3, 1, outputHeight);
THCUNN_check_dim_size(state, bias, 3, 2, outputWidth);
}
THCUNN_check_dim_size(state, input, ndim, dimf, nInputPlane);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth);
}
}
static THCTensor* THNN_(view_weight_local)(
THCState *state,
THCTensor *_weight)
{
THCTensor *weight = THCTensor_(newContiguous)(state, _weight);
AT_CHECK(!weight->is_empty() && (weight->dim() == 3 || weight->dim() == 6), 4,
"weight tensor should be (non-empty) 3D or 6D - got size: ", weight->sizes());
if (weight->dim() == 6) {
int64_t s1 = weight->size(0) * weight->size(1);
int64_t s2 = weight->size(2);
int64_t s3 = weight->size(3) * weight->size(4) * weight->size(5);
THCTensor *old_weight = weight;
weight = THCTensor_(newWithStorage3d)(state,
THTensor_getStoragePtr(weight),
weight->storage_offset(),
s1, -1, s2, -1, s3, -1);
THCTensor_(free)(state, old_weight);
}
return weight;
}
void THNN_(SpatialConvolutionLocal_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias,
THCTensor *finput,
THCTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight)
{
THCUNN_assertSameGPU(state, 5, input, output, weight,
bias, finput);
weight = THNN_(view_weight_local)(state, weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(state, input, NULL, weight, bias, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THCTensor_(newContiguous)(state, input);
int64_t nInputPlane = THCTensor_(size)(state,weight,2)/(kW*kH);
int64_t nOutputPlane = THCTensor_(size)(state,weight,1);
int batch = 1;
if (input->dim() == 3) {
// Force batch
batch = 0;
THCTensor_(resize4d)(state, input, 1, nInputPlane, inputHeight, inputWidth);
}
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, output, batchSize, nOutputPlane, outputHeight, outputWidth);
// Augment the input
THCTensor_(resize3d)(state, finput, batchSize, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *finput_n = THCTensor_(new)(state);
THCTensor *output_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
THCTensor *finput3d, *output3d;
THCTensor *wslice = THCTensor_(new)(state);
THCTensor *islice = THCTensor_(new)(state);
THCTensor *oslice = THCTensor_(new)(state);
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
THCTensor_(select)(state, finput_n, finput, 0, elt);
THCTensor_(select)(state, output_n, output, 0, elt);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
1, 1, THCTensor_(data)(state, finput_n)
);
output3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(output_n), output_n->storage_offset(),
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
finput3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(finput_n), finput_n->storage_offset(),
outputHeight*outputWidth, 1,
kW*kH*nInputPlane, outputHeight*outputWidth,
1, kW*kH*nInputPlane*outputHeight*outputWidth);
THCTensor_(copy)(state, output_n, bias);
// weight: oH*oW x nOutputPlane x nInputPlane*kH*kW
// finput3d: oH*oW x nInputPlane*kH*kW x 1
THCTensor_(baddbmm)(state, output3d, ScalarConvert<int, scalar_t>::to(1),
output3d, ScalarConvert<int, scalar_t>::to(1),
weight, finput3d);
// output3d: oH*oW x nOutputPlane x 1
THCTensor_(free)(state, output3d);
THCTensor_(free)(state, finput3d);
THCTensor_(free)(state, wslice);
THCTensor_(free)(state, islice);
THCTensor_(free)(state, oslice);
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, finput_n);
THCTensor_(free)(state, output_n);
// Resize output
if (batch == 0) {
THCTensor_(resize3d)(state, output, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, weight);
}
void THNN_(SpatialConvolutionLocal_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *finput,
THCTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight)
{
THCUNN_assertSameGPU(state, 5, input, gradOutput, weight,
fgradInput, gradInput);
weight = THNN_(view_weight_local)(state, weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(state, input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputPlane = THCTensor_(size)(state,weight,2)/(kW*kH);
int64_t nOutputPlane = THCTensor_(size)(state,weight,1);
int batch = 1;
if (input->dim() == 3) {
// Force batch
batch = 0;
THCTensor_(resize4d)(state, input, 1, nInputPlane, inputHeight, inputWidth);
THCTensor_(resize4d)(state, gradOutput, 1, nOutputPlane, outputHeight, outputWidth);
}
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCTensor_(resize3d)(state, fgradInput, batchSize, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *gradInput_n = THCTensor_(new)(state);
THCTensor *fgradInput_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
THCTensor *tweight = THCTensor_(new)(state);
THCTensor_(transpose)(state, tweight, weight, 1, 2);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
THCTensor *gradOutput3d, *fgradInput3d;
THCTensor *wslice = THCTensor_(new)(state);
THCTensor *gislice = THCTensor_(new)(state);
THCTensor *goslice = THCTensor_(new)(state);
// Matrix mulitply per sample:
THCTensor_(select)(state, gradInput_n, gradInput, 0, elt);
THCTensor_(select)(state, fgradInput_n, fgradInput, 0, elt);
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
gradOutput3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(gradOutput_n), gradOutput_n->storage_offset(),
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
fgradInput3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(fgradInput_n), fgradInput_n->storage_offset(),
outputHeight*outputWidth, 1,
kW*kH*nInputPlane, outputHeight*outputWidth,
1, kW*kH*nInputPlane*outputHeight*outputWidth);
// weight: oH*oW x nInputPlane*kH*kW x nOutputPlane
// gradOutput3d: oH*oW x nOutputPlane x 1
THCTensor_(baddbmm)(state, fgradInput3d,
ScalarConvert<int, scalar_t>::to(0),
fgradInput3d, ScalarConvert<int, scalar_t>::to(1),
tweight, gradOutput3d);
// fgradInput3d: oH*oW x nInputPlane*kH*kW x 1
// Unpack columns back into input:
col2im<scalar_t, accreal>(
THCState_getCurrentStream(state),
THCTensor_(data)(state, fgradInput_n),
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
1, 1, THCTensor_(data)(state, gradInput_n)
);
THCTensor_(free)(state, gradOutput3d);
THCTensor_(free)(state, fgradInput3d);
THCTensor_(free)(state, wslice);
THCTensor_(free)(state, gislice);
THCTensor_(free)(state, goslice);
}
// Free
THCTensor_(free)(state, gradInput_n);
THCTensor_(free)(state, fgradInput_n);
THCTensor_(free)(state, gradOutput_n);
// Resize output
if (batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
THCTensor_(resize3d)(state, gradInput, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, tweight);
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
THCTensor_(free)(state, weight);
}
void THNN_(SpatialConvolutionLocal_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *finput,
THCTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight,
accreal scale_)
{
scalar_t scale = ScalarConvert<accreal, scalar_t>::to(scale_);
THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight,
gradBias, finput);
THArgCheck(THCTensor_(isContiguous)(state, gradWeight), 4, "gradWeight needs to be contiguous");
THArgCheck(THCTensor_(isContiguous)(state, gradBias), 5, "gradBias needs to be contiguous");
gradWeight = THNN_(view_weight_local)(state, gradWeight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(state, input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputPlane = THCTensor_(size)(state,gradWeight,2)/(kW*kH);
int64_t nOutputPlane = THCTensor_(size)(state,gradWeight,1);
int batch = 1;
if (input->dim() == 3) {
// Force batch
batch = 0;
THCTensor_(resize4d)(state, input, 1, nInputPlane, inputHeight, inputWidth);
THCTensor_(resize4d)(state, gradOutput, 1, nOutputPlane, outputHeight, outputWidth);
}
// Batch size + input planes
int64_t batchSize = input->size(0);
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *finput_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
THCTensor *gradOutput3d, *finput3d;
THCTensor *gwslice = THCTensor_(new)(state);
THCTensor *islice = THCTensor_(new)(state);
THCTensor *goslice = THCTensor_(new)(state);
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
THCTensor_(select)(state, finput_n, finput, 0, elt);
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
gradOutput3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(gradOutput_n), gradOutput_n->storage_offset(),
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
finput3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(finput_n), finput_n->storage_offset(),
outputHeight*outputWidth, 1,
1, kW*kH*nInputPlane*outputHeight*outputWidth,
kW*kH*nInputPlane, outputHeight*outputWidth);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
1, 1, THCTensor_(data)(state, finput_n)
);
// gradOutput3d: oH*oW x nOutputPlane x 1
// finput3d: oH*oW x 1 x kW*kH*nInputPlane
THCTensor_(baddbmm)(state, gradWeight, ScalarConvert<int, scalar_t>::to(1),
gradWeight, scale, gradOutput3d, finput3d);
// gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane
THCTensor_(cadd)(state, gradBias, gradBias, scale, gradOutput_n);
THCTensor_(free)(state, gradOutput3d);
THCTensor_(free)(state, finput3d);
THCTensor_(free)(state, gwslice);
THCTensor_(free)(state, goslice);
THCTensor_(free)(state, islice);
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, finput_n);
THCTensor_(free)(state, gradOutput_n);
// Resize
if (batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
THCTensor_(free)(state, gradWeight);
}
#endif
| 16518f5d1fbb0426ff4195dd316278a92da26203.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/SpatialConvolutionLocal.cu"
#else
static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput,
THCTensor *weight, THCTensor *bias,
int kH, int kW, int dH,
int dW, int padH, int padW,
int64_t inputHeight, int64_t inputWidth,
int64_t outputHeight, int64_t outputWidth) {
THArgCheck(kW > 0 && kH > 0, 9,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
int64_t nInputPlane = weight->size(2) / (kH * kW);
int64_t nOutputPlane = weight->size(1);
if (bias != NULL) {
THCUNN_check_dim_size(state, bias, 3, 0, nOutputPlane);
THCUNN_check_dim_size(state, bias, 3, 1, outputHeight);
THCUNN_check_dim_size(state, bias, 3, 2, outputWidth);
}
THCUNN_check_dim_size(state, input, ndim, dimf, nInputPlane);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth);
}
}
static THCTensor* THNN_(view_weight_local)(
THCState *state,
THCTensor *_weight)
{
THCTensor *weight = THCTensor_(newContiguous)(state, _weight);
AT_CHECK(!weight->is_empty() && (weight->dim() == 3 || weight->dim() == 6), 4,
"weight tensor should be (non-empty) 3D or 6D - got size: ", weight->sizes());
if (weight->dim() == 6) {
int64_t s1 = weight->size(0) * weight->size(1);
int64_t s2 = weight->size(2);
int64_t s3 = weight->size(3) * weight->size(4) * weight->size(5);
THCTensor *old_weight = weight;
weight = THCTensor_(newWithStorage3d)(state,
THTensor_getStoragePtr(weight),
weight->storage_offset(),
s1, -1, s2, -1, s3, -1);
THCTensor_(free)(state, old_weight);
}
return weight;
}
void THNN_(SpatialConvolutionLocal_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias,
THCTensor *finput,
THCTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight)
{
THCUNN_assertSameGPU(state, 5, input, output, weight,
bias, finput);
weight = THNN_(view_weight_local)(state, weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(state, input, NULL, weight, bias, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THCTensor_(newContiguous)(state, input);
int64_t nInputPlane = THCTensor_(size)(state,weight,2)/(kW*kH);
int64_t nOutputPlane = THCTensor_(size)(state,weight,1);
int batch = 1;
if (input->dim() == 3) {
// Force batch
batch = 0;
THCTensor_(resize4d)(state, input, 1, nInputPlane, inputHeight, inputWidth);
}
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, output, batchSize, nOutputPlane, outputHeight, outputWidth);
// Augment the input
THCTensor_(resize3d)(state, finput, batchSize, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *finput_n = THCTensor_(new)(state);
THCTensor *output_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
THCTensor *finput3d, *output3d;
THCTensor *wslice = THCTensor_(new)(state);
THCTensor *islice = THCTensor_(new)(state);
THCTensor *oslice = THCTensor_(new)(state);
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
THCTensor_(select)(state, finput_n, finput, 0, elt);
THCTensor_(select)(state, output_n, output, 0, elt);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
1, 1, THCTensor_(data)(state, finput_n)
);
output3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(output_n), output_n->storage_offset(),
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
finput3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(finput_n), finput_n->storage_offset(),
outputHeight*outputWidth, 1,
kW*kH*nInputPlane, outputHeight*outputWidth,
1, kW*kH*nInputPlane*outputHeight*outputWidth);
THCTensor_(copy)(state, output_n, bias);
// weight: oH*oW x nOutputPlane x nInputPlane*kH*kW
// finput3d: oH*oW x nInputPlane*kH*kW x 1
THCTensor_(baddbmm)(state, output3d, ScalarConvert<int, scalar_t>::to(1),
output3d, ScalarConvert<int, scalar_t>::to(1),
weight, finput3d);
// output3d: oH*oW x nOutputPlane x 1
THCTensor_(free)(state, output3d);
THCTensor_(free)(state, finput3d);
THCTensor_(free)(state, wslice);
THCTensor_(free)(state, islice);
THCTensor_(free)(state, oslice);
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, finput_n);
THCTensor_(free)(state, output_n);
// Resize output
if (batch == 0) {
THCTensor_(resize3d)(state, output, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, weight);
}
void THNN_(SpatialConvolutionLocal_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *finput,
THCTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight)
{
THCUNN_assertSameGPU(state, 5, input, gradOutput, weight,
fgradInput, gradInput);
weight = THNN_(view_weight_local)(state, weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(state, input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputPlane = THCTensor_(size)(state,weight,2)/(kW*kH);
int64_t nOutputPlane = THCTensor_(size)(state,weight,1);
int batch = 1;
if (input->dim() == 3) {
// Force batch
batch = 0;
THCTensor_(resize4d)(state, input, 1, nInputPlane, inputHeight, inputWidth);
THCTensor_(resize4d)(state, gradOutput, 1, nOutputPlane, outputHeight, outputWidth);
}
// Batch size + input planes
int64_t batchSize = input->size(0);
// Resize output
THCTensor_(resize4d)(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCTensor_(resize3d)(state, fgradInput, batchSize, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCTensor *gradInput_n = THCTensor_(new)(state);
THCTensor *fgradInput_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
THCTensor *tweight = THCTensor_(new)(state);
THCTensor_(transpose)(state, tweight, weight, 1, 2);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
THCTensor *gradOutput3d, *fgradInput3d;
THCTensor *wslice = THCTensor_(new)(state);
THCTensor *gislice = THCTensor_(new)(state);
THCTensor *goslice = THCTensor_(new)(state);
// Matrix mulitply per sample:
THCTensor_(select)(state, gradInput_n, gradInput, 0, elt);
THCTensor_(select)(state, fgradInput_n, fgradInput, 0, elt);
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
gradOutput3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(gradOutput_n), gradOutput_n->storage_offset(),
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
fgradInput3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(fgradInput_n), fgradInput_n->storage_offset(),
outputHeight*outputWidth, 1,
kW*kH*nInputPlane, outputHeight*outputWidth,
1, kW*kH*nInputPlane*outputHeight*outputWidth);
// weight: oH*oW x nInputPlane*kH*kW x nOutputPlane
// gradOutput3d: oH*oW x nOutputPlane x 1
THCTensor_(baddbmm)(state, fgradInput3d,
ScalarConvert<int, scalar_t>::to(0),
fgradInput3d, ScalarConvert<int, scalar_t>::to(1),
tweight, gradOutput3d);
// fgradInput3d: oH*oW x nInputPlane*kH*kW x 1
// Unpack columns back into input:
col2im<scalar_t, accreal>(
THCState_getCurrentStream(state),
THCTensor_(data)(state, fgradInput_n),
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW,
1, 1, THCTensor_(data)(state, gradInput_n)
);
THCTensor_(free)(state, gradOutput3d);
THCTensor_(free)(state, fgradInput3d);
THCTensor_(free)(state, wslice);
THCTensor_(free)(state, gislice);
THCTensor_(free)(state, goslice);
}
// Free
THCTensor_(free)(state, gradInput_n);
THCTensor_(free)(state, fgradInput_n);
THCTensor_(free)(state, gradOutput_n);
// Resize output
if (batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
THCTensor_(resize3d)(state, gradInput, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, tweight);
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
THCTensor_(free)(state, weight);
}
void THNN_(SpatialConvolutionLocal_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *finput,
THCTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight,
accreal scale_)
{
scalar_t scale = ScalarConvert<accreal, scalar_t>::to(scale_);
THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight,
gradBias, finput);
THArgCheck(THCTensor_(isContiguous)(state, gradWeight), 4, "gradWeight needs to be contiguous");
THArgCheck(THCTensor_(isContiguous)(state, gradBias), 5, "gradBias needs to be contiguous");
gradWeight = THNN_(view_weight_local)(state, gradWeight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(state, input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputPlane = THCTensor_(size)(state,gradWeight,2)/(kW*kH);
int64_t nOutputPlane = THCTensor_(size)(state,gradWeight,1);
int batch = 1;
if (input->dim() == 3) {
// Force batch
batch = 0;
THCTensor_(resize4d)(state, input, 1, nInputPlane, inputHeight, inputWidth);
THCTensor_(resize4d)(state, gradOutput, 1, nOutputPlane, outputHeight, outputWidth);
}
// Batch size + input planes
int64_t batchSize = input->size(0);
// Helpers
THCTensor *input_n = THCTensor_(new)(state);
THCTensor *finput_n = THCTensor_(new)(state);
THCTensor *gradOutput_n = THCTensor_(new)(state);
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
THCTensor *gradOutput3d, *finput3d;
THCTensor *gwslice = THCTensor_(new)(state);
THCTensor *islice = THCTensor_(new)(state);
THCTensor *goslice = THCTensor_(new)(state);
// Matrix mulitply per output:
THCTensor_(select)(state, input_n, input, 0, elt);
THCTensor_(select)(state, finput_n, finput, 0, elt);
THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt);
gradOutput3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(gradOutput_n), gradOutput_n->storage_offset(),
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
finput3d = THCTensor_(newWithStorage3d)(state, THTensor_getStoragePtr(finput_n), finput_n->storage_offset(),
outputHeight*outputWidth, 1,
1, kW*kH*nInputPlane*outputHeight*outputWidth,
kW*kH*nInputPlane, outputHeight*outputWidth);
// Extract columns:
im2col(
THCState_getCurrentStream(state),
THCTensor_(data)(state, input_n),
nInputPlane, inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW, padH, padW, dH, dW,
1, 1, THCTensor_(data)(state, finput_n)
);
// gradOutput3d: oH*oW x nOutputPlane x 1
// finput3d: oH*oW x 1 x kW*kH*nInputPlane
THCTensor_(baddbmm)(state, gradWeight, ScalarConvert<int, scalar_t>::to(1),
gradWeight, scale, gradOutput3d, finput3d);
// gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane
THCTensor_(cadd)(state, gradBias, gradBias, scale, gradOutput_n);
THCTensor_(free)(state, gradOutput3d);
THCTensor_(free)(state, finput3d);
THCTensor_(free)(state, gwslice);
THCTensor_(free)(state, goslice);
THCTensor_(free)(state, islice);
}
// Free
THCTensor_(free)(state, input_n);
THCTensor_(free)(state, finput_n);
THCTensor_(free)(state, gradOutput_n);
// Resize
if (batch == 0) {
THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth);
THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
THCTensor_(free)(state, gradWeight);
}
#endif
|
4e0c7579604b173e3b0d71964f8ab038664026e5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#define GLEW_STATIC
#include <GL/glew.h>
// GLFW
#include <GLFW/glfw3.h>
//CUDA
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// Other includes
#include "Shader.h"
#include "Particle.h"
#include "Utility.h"
#define GRAVITY 2000 //some constants I need
#define DRAG 10
#define RESTITUTION_COEFFICIENT 1 // how much energy must be absorbed when bouncing off a wall
#define INITIAL_DISTANCE 0.01 // how far particles are one another initially
#define MOUSE_FORCE -20000
#define CHUNK_NB 10 // separating particles in smaller chunks to avoid having HUGE arrays (1 billion particles) : else we might face stack overflow or framerate drops. To understand the code faster, you can think that CHUNK_NB = 1
#define VERTEX_CHUNK 100000 // how much particles are in each chunk
#define PARTICLE_SIZE 10000//how much particles totally
// Function prototypes
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mode);
void mouse_callback(GLFWwindow* window, double xpos, double ypos);
void mouse_button_callback(GLFWwindow* window, int button, int action, int mods);
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
// Window dimensions
const GLuint WIDTH = 800, HEIGHT = 600;
GLfloat deltaTime = 0.0f;
GLfloat lastFrame = 0.0f;
GLfloat FPS = 0.0f;
const int particleRow = 100;
const int particleCol = PARTICLE_SIZE/ particleRow;
vec2 mousePos = vec2(0, 0);
bool LMB = false; // is left mouse button hit ?
float dt = 0.003;
using namespace std;
using namespace glm;
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
////////////////////////////////////////////////
//
// CUDA part
//
/////////////////////////////////////////////////
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
////////////////////////////////////////////////
//
// GL part
//
/////////////////////////////////////////////////
// Init GLFW
vector<Particle> particles; // an array storing Particle instances (that we'll move)
int particleSize = PARTICLE_SIZE; // avoid repeating particles.size() during the for loop to save some time (remember that the for loop is done 1 billion time per frame !
for (int i(0); i < particleRow; i++) // storing Particle instances in the particles array
{
for (int j(0); j < particleCol; j++)
{
Particle particle; // see Particle.h and Particle.cpp
particle.setPosition(vec2(j*INITIAL_DISTANCE, i*INITIAL_DISTANCE)); // we place the particles in a square shape
particle.setMass(10);
particles.push_back(particle);
}
}
glfwInit();
// Set all the required options for GLFW
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
// Create a GLFWwindow object that we can use for GLFW's functions
GLFWwindow* window = glfwCreateWindow(WIDTH, HEIGHT, "LearnOpenGL", nullptr, nullptr);
glfwMakeContextCurrent(window);
// Set the required callback functions
glfwSetKeyCallback(window, key_callback);
glfwSetCursorPosCallback(window, mouse_callback);
glfwSetMouseButtonCallback(window, mouse_button_callback);
// Set this to true so GLEW knows to use a modern approach to retrieving function pointers and extensions
glewExperimental = GL_TRUE;
// Initialize GLEW to setup the OpenGL Function pointers
glewInit();
// Define the viewport dimensions
glViewport(0, 0, WIDTH, HEIGHT);
// Build and compile our shader program
Shader ourShader("vs.txt", "fs.txt");
// Set up vertex data (and buffer(s)) and attribute pointers
GLfloat* vertices = new GLfloat [PARTICLE_SIZE * 6];
/*GLfloat vertices[] = {
// Positions // Colors
0.5f, -0.5f, 0.0f, 1.0f, 0.0f, 0.0f, // Bottom Right
-0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, // Bottom Left
0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 1.0f // Top
};
*/
GLuint VBO, VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
// Bind the Vertex Array Object first, then bind and set vertex buffer(s) and attribute pointer(s).
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, PARTICLE_SIZE * 6 * sizeof(GLfloat), vertices, GL_DYNAMIC_DRAW);
//cout << "PARTICLE_SIZE * 6 * sizeof(float):" << PARTICLE_SIZE * 6 * sizeof(float) << endl;
// Position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
// Color attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
glBindVertexArray(0); // Unbind VAO
//glEnable(GL_POINT_SMOOTH); // allow to have rounded dots
//glEnable(GL_BLEND);
//glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glPointSize(2);
// Game loop
while (!glfwWindowShouldClose(window))
{
// Set frame time
GLfloat currentFrame = glfwGetTime();
deltaTime = currentFrame - lastFrame;
FPS = 60.0f / deltaTime;
if (deltaTime >= 1.0)
{
lastFrame = currentFrame;
cout << FPS << endl;
cout << particles[0].getSpeed().x << ", " << particles[0].getSpeed().y << ":" << sqrt(pow(particles[0].getSpeed().x, 2) + pow(particles[0].getSpeed().y, 2)) << endl;
}
// Check if any events have been activiated (key pressed, mouse moved etc.) and call corresponding response functions
glfwPollEvents();
for (int i(0); i < particleSize; i++) // now, each frame, we want to update each particle's position according to the newton's law, color according to its speed, and maybe make it collide with walls (this for loop is executed 1 billion times each frame)
{
//particles[i].addForce(Vector2f(0,GRAVITY)) ; // example for adding gravity force
particles[i].addForce((vec2(mousePos - particles[i].getPosition()) * (float)(LMB * 10000 / pow(Distance(mousePos, particles[i].getPosition())+5, 2))));
// if the user clicks we add a force proportionnal to the inverse of the distance squared
particles[i].addForce(-particles[i].getSpeed()*(float)DRAG);
// we add a drag force proportionnal to the speed
//previousPosition = particles[i].getPosition() ; // uncomment this line if you want to perform collision detection
particles[i].updatePosition(dt); // we update the position of the particle according to the Newton's law (see Particle.h and Particle.cpp)
particles[i].clearForce(); // we don't want forces to add over time so we clear them before adding them the next frame
/*for(int j(0) ; j < wallPoints.size() ; j+=2) // uncomment these lines if you want to perform collision detection
{
if(determinant(wallPoints[j+1] - wallPoints[j], wallPoints[j+1]-particles[i].getPosition())*determinant(wallPoints[j+1] - wallPoints[j], wallPoints[j+1]-previousPosition)<0) // if we crossed a wall during this frame
{
Vector2f v = wallPoints[j+1] - wallPoints[j] ; // vector directing the wall
Vector2f n = Vector2f(-v.y,v.x) ; // vector normal to the wall
n/=Norm(n) ; // we want the normal vector to be a unit vector (length = 1)
particles[i].setPosition(previousPosition) ; // we put the particle in its previous position (in front of the wall, since it passed it)
float j = -(1+RESTITUTION_COEFFICIENT)*dotProduct(particles[i].getSpeed(), n) ; // we compute the speed after bouncing off
particles[i].setSpeed(particles[i].getSpeed() + j*n) ; // we change the speed
}
}*/
}
for (int i(0); i < particleSize; i++) // we convert Vector2f positions to the OpenGL's way of storing positions : static arrays of floats
{
vertices[ i*6 ] = particles[i].getPosition().x;
vertices[i*6 + 1] = particles[i].getPosition().y;
vertices[i*6 + 2] = 0.0f;
vertices[i*6 + 3] = clamp(100 * Norm(particles[i].getSpeed()), 0, 255);
vertices[i*6 + 4] = clamp(255-100*Norm(particles[i].getSpeed()), 0, 255); // we change the particle's colors according to their speed
vertices[i*6 + 5] = 0.0f;
}
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, PARTICLE_SIZE * 6 * sizeof(GLfloat), vertices, GL_DYNAMIC_DRAW);
// Render
// Clear the colorbuffer
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Draw the triangle
ourShader.Use();
glDrawArrays(GL_POINTS, 0, particleSize);
//glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
// Swap the screen buffers
glfwSwapBuffers(window);
}
// Properly de-allocate all resources once they've outlived their purpose
delete[] vertices;
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
// Terminate GLFW, clearing any resources allocated by GLFW.
glfwTerminate();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> >(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
// Is called whenever a key is pressed/released via GLFW
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mode)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
}
void mouse_callback(GLFWwindow* window, double xpos, double ypos)
{
mousePos = vec2(2*xpos/WIDTH - 1 ,-2*ypos/HEIGHT + 1 );
}
void mouse_button_callback(GLFWwindow* window, int button, int action, int mods)
{
if (button == GLFW_MOUSE_BUTTON_LEFT && action == GLFW_PRESS)
{
LMB = true;
cout << "mousePos:" << mousePos.x << ", " << mousePos.y << endl;
}
else
{
LMB = false;
}
} | 4e0c7579604b173e3b0d71964f8ab038664026e5.cu | #include <stdio.h>
#include <iostream>
#define GLEW_STATIC
#include <GL/glew.h>
// GLFW
#include <GLFW/glfw3.h>
//CUDA
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// Other includes
#include "Shader.h"
#include "Particle.h"
#include "Utility.h"
#define GRAVITY 2000 //some constants I need
#define DRAG 10
#define RESTITUTION_COEFFICIENT 1 // how much energy must be absorbed when bouncing off a wall
#define INITIAL_DISTANCE 0.01 // how far particles are one another initially
#define MOUSE_FORCE -20000
#define CHUNK_NB 10 // separating particles in smaller chunks to avoid having HUGE arrays (1 billion particles) : else we might face stack overflow or framerate drops. To understand the code faster, you can think that CHUNK_NB = 1
#define VERTEX_CHUNK 100000 // how much particles are in each chunk
#define PARTICLE_SIZE 10000//how much particles totally
// Function prototypes
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mode);
void mouse_callback(GLFWwindow* window, double xpos, double ypos);
void mouse_button_callback(GLFWwindow* window, int button, int action, int mods);
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
// Window dimensions
const GLuint WIDTH = 800, HEIGHT = 600;
GLfloat deltaTime = 0.0f;
GLfloat lastFrame = 0.0f;
GLfloat FPS = 0.0f;
const int particleRow = 100;
const int particleCol = PARTICLE_SIZE/ particleRow;
vec2 mousePos = vec2(0, 0);
bool LMB = false; // is left mouse button hit ?
float dt = 0.003;
using namespace std;
using namespace glm;
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
////////////////////////////////////////////////
//
// CUDA part
//
/////////////////////////////////////////////////
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
////////////////////////////////////////////////
//
// GL part
//
/////////////////////////////////////////////////
// Init GLFW
vector<Particle> particles; // an array storing Particle instances (that we'll move)
int particleSize = PARTICLE_SIZE; // avoid repeating particles.size() during the for loop to save some time (remember that the for loop is done 1 billion time per frame !
for (int i(0); i < particleRow; i++) // storing Particle instances in the particles array
{
for (int j(0); j < particleCol; j++)
{
Particle particle; // see Particle.h and Particle.cpp
particle.setPosition(vec2(j*INITIAL_DISTANCE, i*INITIAL_DISTANCE)); // we place the particles in a square shape
particle.setMass(10);
particles.push_back(particle);
}
}
glfwInit();
// Set all the required options for GLFW
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
// Create a GLFWwindow object that we can use for GLFW's functions
GLFWwindow* window = glfwCreateWindow(WIDTH, HEIGHT, "LearnOpenGL", nullptr, nullptr);
glfwMakeContextCurrent(window);
// Set the required callback functions
glfwSetKeyCallback(window, key_callback);
glfwSetCursorPosCallback(window, mouse_callback);
glfwSetMouseButtonCallback(window, mouse_button_callback);
// Set this to true so GLEW knows to use a modern approach to retrieving function pointers and extensions
glewExperimental = GL_TRUE;
// Initialize GLEW to setup the OpenGL Function pointers
glewInit();
// Define the viewport dimensions
glViewport(0, 0, WIDTH, HEIGHT);
// Build and compile our shader program
Shader ourShader("vs.txt", "fs.txt");
// Set up vertex data (and buffer(s)) and attribute pointers
GLfloat* vertices = new GLfloat [PARTICLE_SIZE * 6];
/*GLfloat vertices[] = {
// Positions // Colors
0.5f, -0.5f, 0.0f, 1.0f, 0.0f, 0.0f, // Bottom Right
-0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, // Bottom Left
0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 1.0f // Top
};
*/
GLuint VBO, VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
// Bind the Vertex Array Object first, then bind and set vertex buffer(s) and attribute pointer(s).
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, PARTICLE_SIZE * 6 * sizeof(GLfloat), vertices, GL_DYNAMIC_DRAW);
//cout << "PARTICLE_SIZE * 6 * sizeof(float):" << PARTICLE_SIZE * 6 * sizeof(float) << endl;
// Position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
// Color attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
glBindVertexArray(0); // Unbind VAO
//glEnable(GL_POINT_SMOOTH); // allow to have rounded dots
//glEnable(GL_BLEND);
//glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glPointSize(2);
// Game loop
while (!glfwWindowShouldClose(window))
{
// Set frame time
GLfloat currentFrame = glfwGetTime();
deltaTime = currentFrame - lastFrame;
FPS = 60.0f / deltaTime;
if (deltaTime >= 1.0)
{
lastFrame = currentFrame;
cout << FPS << endl;
cout << particles[0].getSpeed().x << ", " << particles[0].getSpeed().y << ":" << sqrt(pow(particles[0].getSpeed().x, 2) + pow(particles[0].getSpeed().y, 2)) << endl;
}
// Check if any events have been activiated (key pressed, mouse moved etc.) and call corresponding response functions
glfwPollEvents();
for (int i(0); i < particleSize; i++) // now, each frame, we want to update each particle's position according to the newton's law, color according to its speed, and maybe make it collide with walls (this for loop is executed 1 billion times each frame)
{
//particles[i].addForce(Vector2f(0,GRAVITY)) ; // example for adding gravity force
particles[i].addForce((vec2(mousePos - particles[i].getPosition()) * (float)(LMB * 10000 / pow(Distance(mousePos, particles[i].getPosition())+5, 2))));
// if the user clicks we add a force proportionnal to the inverse of the distance squared
particles[i].addForce(-particles[i].getSpeed()*(float)DRAG);
// we add a drag force proportionnal to the speed
//previousPosition = particles[i].getPosition() ; // uncomment this line if you want to perform collision detection
particles[i].updatePosition(dt); // we update the position of the particle according to the Newton's law (see Particle.h and Particle.cpp)
particles[i].clearForce(); // we don't want forces to add over time so we clear them before adding them the next frame
/*for(int j(0) ; j < wallPoints.size() ; j+=2) // uncomment these lines if you want to perform collision detection
{
if(determinant(wallPoints[j+1] - wallPoints[j], wallPoints[j+1]-particles[i].getPosition())*determinant(wallPoints[j+1] - wallPoints[j], wallPoints[j+1]-previousPosition)<0) // if we crossed a wall during this frame
{
Vector2f v = wallPoints[j+1] - wallPoints[j] ; // vector directing the wall
Vector2f n = Vector2f(-v.y,v.x) ; // vector normal to the wall
n/=Norm(n) ; // we want the normal vector to be a unit vector (length = 1)
particles[i].setPosition(previousPosition) ; // we put the particle in its previous position (in front of the wall, since it passed it)
float j = -(1+RESTITUTION_COEFFICIENT)*dotProduct(particles[i].getSpeed(), n) ; // we compute the speed after bouncing off
particles[i].setSpeed(particles[i].getSpeed() + j*n) ; // we change the speed
}
}*/
}
for (int i(0); i < particleSize; i++) // we convert Vector2f positions to the OpenGL's way of storing positions : static arrays of floats
{
vertices[ i*6 ] = particles[i].getPosition().x;
vertices[i*6 + 1] = particles[i].getPosition().y;
vertices[i*6 + 2] = 0.0f;
vertices[i*6 + 3] = clamp(100 * Norm(particles[i].getSpeed()), 0, 255);
vertices[i*6 + 4] = clamp(255-100*Norm(particles[i].getSpeed()), 0, 255); // we change the particle's colors according to their speed
vertices[i*6 + 5] = 0.0f;
}
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, PARTICLE_SIZE * 6 * sizeof(GLfloat), vertices, GL_DYNAMIC_DRAW);
// Render
// Clear the colorbuffer
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Draw the triangle
ourShader.Use();
glDrawArrays(GL_POINTS, 0, particleSize);
//glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
// Swap the screen buffers
glfwSwapBuffers(window);
}
// Properly de-allocate all resources once they've outlived their purpose
delete[] vertices;
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
// Terminate GLFW, clearing any resources allocated by GLFW.
glfwTerminate();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> >(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
// Is called whenever a key is pressed/released via GLFW
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mode)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
}
void mouse_callback(GLFWwindow* window, double xpos, double ypos)
{
mousePos = vec2(2*xpos/WIDTH - 1 ,-2*ypos/HEIGHT + 1 );
}
void mouse_button_callback(GLFWwindow* window, int button, int action, int mods)
{
if (button == GLFW_MOUSE_BUTTON_LEFT && action == GLFW_PRESS)
{
LMB = true;
cout << "mousePos:" << mousePos.x << ", " << mousePos.y << endl;
}
else
{
LMB = false;
}
} |
5c22b4b313a7d00d9a87362d949be1f44c058a3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
using namespace cv::gpu;
typedef unsigned char uchar;
typedef unsigned short ushort;
//////////////////////////////////////////////////////////////////////////////////
/// Bilateral filtering
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
__device__ __forceinline__ float norm_l1(const float& a) { return ::fabs(a); }
__device__ __forceinline__ float norm_l1(const float2& a) { return ::fabs(a.x) + ::fabs(a.y); }
__device__ __forceinline__ float norm_l1(const float3& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z); }
__device__ __forceinline__ float norm_l1(const float4& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z) + ::fabs(a.w); }
__device__ __forceinline__ float sqr(const float& a) { return a * a; }
template<typename T, typename B>
__global__ void bilateral_kernel(const PtrStepSz<T> src, PtrStep<T> dst, const B b, const int ksz, const float sigma_spatial2_inv_half, const float sigma_color2_inv_half)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
value_type center = saturate_cast<value_type>(src(y, x));
value_type sum1 = VecTraits<value_type>::all(0);
float sum2 = 0;
int r = ksz / 2;
float r2 = (float)(r * r);
int tx = x - r + ksz;
int ty = y - r + ksz;
if (x - ksz/2 >=0 && y - ksz/2 >=0 && tx < src.cols && ty < src.rows)
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(src(cy, cx));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
else
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(b.at(cy, cx, src.data, src.step));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
dst(y, x) = saturate_cast<T>(sum1 / sum2);
}
template<typename T, template <typename> class B>
void bilateral_caller(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, hipStream_t stream)
{
dim3 block (32, 8);
dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y));
B<T> b(src.rows, src.cols);
float sigma_spatial2_inv_half = -0.5f/(sigma_spatial * sigma_spatial);
float sigma_color2_inv_half = -0.5f/(sigma_color * sigma_color);
cudaSafeCall( hipFuncSetCacheConfig (bilateral_kernel<T, B<T> >, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( bilateral_kernel), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, kernel_size, sigma_spatial2_inv_half, sigma_color2_inv_half);
cudaSafeCall ( hipGetLastError () );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template<typename T>
void bilateral_filter_gpu(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float gauss_spatial_coeff, float gauss_color_coeff, int borderMode, hipStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, hipStream_t stream);
static caller_t funcs[] =
{
bilateral_caller<T, BrdReflect101>,
bilateral_caller<T, BrdReplicate>,
bilateral_caller<T, BrdConstant>,
bilateral_caller<T, BrdReflect>,
bilateral_caller<T, BrdWrap>,
};
funcs[borderMode](src, dst, kernel_size, gauss_spatial_coeff, gauss_color_coeff, stream);
}
}
}}}
#define OCV_INSTANTIATE_BILATERAL_FILTER(T) \
template void cv::gpu::device::imgproc::bilateral_filter_gpu<T>(const PtrStepSzb&, PtrStepSzb, int, float, float, int, hipStream_t);
OCV_INSTANTIATE_BILATERAL_FILTER(uchar)
//OCV_INSTANTIATE_BILATERAL_FILTER(uchar2)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar3)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar4)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar2)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar3)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar4)
OCV_INSTANTIATE_BILATERAL_FILTER(short)
//OCV_INSTANTIATE_BILATERAL_FILTER(short2)
OCV_INSTANTIATE_BILATERAL_FILTER(short3)
OCV_INSTANTIATE_BILATERAL_FILTER(short4)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort)
//OCV_INSTANTIATE_BILATERAL_FILTER(ushort2)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort3)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort4)
//OCV_INSTANTIATE_BILATERAL_FILTER(int)
//OCV_INSTANTIATE_BILATERAL_FILTER(int2)
//OCV_INSTANTIATE_BILATERAL_FILTER(int3)
//OCV_INSTANTIATE_BILATERAL_FILTER(int4)
OCV_INSTANTIATE_BILATERAL_FILTER(float)
//OCV_INSTANTIATE_BILATERAL_FILTER(float2)
OCV_INSTANTIATE_BILATERAL_FILTER(float3)
OCV_INSTANTIATE_BILATERAL_FILTER(float4)
#endif /* CUDA_DISABLER */ | 5c22b4b313a7d00d9a87362d949be1f44c058a3d.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
using namespace cv::gpu;
typedef unsigned char uchar;
typedef unsigned short ushort;
//////////////////////////////////////////////////////////////////////////////////
/// Bilateral filtering
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
__device__ __forceinline__ float norm_l1(const float& a) { return ::fabs(a); }
__device__ __forceinline__ float norm_l1(const float2& a) { return ::fabs(a.x) + ::fabs(a.y); }
__device__ __forceinline__ float norm_l1(const float3& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z); }
__device__ __forceinline__ float norm_l1(const float4& a) { return ::fabs(a.x) + ::fabs(a.y) + ::fabs(a.z) + ::fabs(a.w); }
__device__ __forceinline__ float sqr(const float& a) { return a * a; }
template<typename T, typename B>
__global__ void bilateral_kernel(const PtrStepSz<T> src, PtrStep<T> dst, const B b, const int ksz, const float sigma_spatial2_inv_half, const float sigma_color2_inv_half)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= src.cols || y >= src.rows)
return;
value_type center = saturate_cast<value_type>(src(y, x));
value_type sum1 = VecTraits<value_type>::all(0);
float sum2 = 0;
int r = ksz / 2;
float r2 = (float)(r * r);
int tx = x - r + ksz;
int ty = y - r + ksz;
if (x - ksz/2 >=0 && y - ksz/2 >=0 && tx < src.cols && ty < src.rows)
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(src(cy, cx));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
else
{
for (int cy = y - r; cy < ty; ++cy)
for (int cx = x - r; cx < tx; ++cx)
{
float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy);
if (space2 > r2)
continue;
value_type value = saturate_cast<value_type>(b.at(cy, cx, src.data, src.step));
float weight = ::exp(space2 * sigma_spatial2_inv_half + sqr(norm_l1(value - center)) * sigma_color2_inv_half);
sum1 = sum1 + weight * value;
sum2 = sum2 + weight;
}
}
dst(y, x) = saturate_cast<T>(sum1 / sum2);
}
template<typename T, template <typename> class B>
void bilateral_caller(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, cudaStream_t stream)
{
dim3 block (32, 8);
dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y));
B<T> b(src.rows, src.cols);
float sigma_spatial2_inv_half = -0.5f/(sigma_spatial * sigma_spatial);
float sigma_color2_inv_half = -0.5f/(sigma_color * sigma_color);
cudaSafeCall( cudaFuncSetCacheConfig (bilateral_kernel<T, B<T> >, cudaFuncCachePreferL1) );
bilateral_kernel<<<grid, block>>>((PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, kernel_size, sigma_spatial2_inv_half, sigma_color2_inv_half);
cudaSafeCall ( cudaGetLastError () );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template<typename T>
void bilateral_filter_gpu(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float gauss_spatial_coeff, float gauss_color_coeff, int borderMode, cudaStream_t stream)
{
typedef void (*caller_t)(const PtrStepSzb& src, PtrStepSzb dst, int kernel_size, float sigma_spatial, float sigma_color, cudaStream_t stream);
static caller_t funcs[] =
{
bilateral_caller<T, BrdReflect101>,
bilateral_caller<T, BrdReplicate>,
bilateral_caller<T, BrdConstant>,
bilateral_caller<T, BrdReflect>,
bilateral_caller<T, BrdWrap>,
};
funcs[borderMode](src, dst, kernel_size, gauss_spatial_coeff, gauss_color_coeff, stream);
}
}
}}}
#define OCV_INSTANTIATE_BILATERAL_FILTER(T) \
template void cv::gpu::device::imgproc::bilateral_filter_gpu<T>(const PtrStepSzb&, PtrStepSzb, int, float, float, int, cudaStream_t);
OCV_INSTANTIATE_BILATERAL_FILTER(uchar)
//OCV_INSTANTIATE_BILATERAL_FILTER(uchar2)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar3)
OCV_INSTANTIATE_BILATERAL_FILTER(uchar4)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar2)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar3)
//OCV_INSTANTIATE_BILATERAL_FILTER(schar4)
OCV_INSTANTIATE_BILATERAL_FILTER(short)
//OCV_INSTANTIATE_BILATERAL_FILTER(short2)
OCV_INSTANTIATE_BILATERAL_FILTER(short3)
OCV_INSTANTIATE_BILATERAL_FILTER(short4)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort)
//OCV_INSTANTIATE_BILATERAL_FILTER(ushort2)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort3)
OCV_INSTANTIATE_BILATERAL_FILTER(ushort4)
//OCV_INSTANTIATE_BILATERAL_FILTER(int)
//OCV_INSTANTIATE_BILATERAL_FILTER(int2)
//OCV_INSTANTIATE_BILATERAL_FILTER(int3)
//OCV_INSTANTIATE_BILATERAL_FILTER(int4)
OCV_INSTANTIATE_BILATERAL_FILTER(float)
//OCV_INSTANTIATE_BILATERAL_FILTER(float2)
OCV_INSTANTIATE_BILATERAL_FILTER(float3)
OCV_INSTANTIATE_BILATERAL_FILTER(float4)
#endif /* CUDA_DISABLER */ |
2d2ee5525da41722ed2967dc24804e368d85ec3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Cource - " CUDA "
Task 3:
,
GPU.
GPU .
Written by Pavel Santaev
*/
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include <png.h>
#include <multithreading.cpp>
#include <libpng.h>
void abort(const char * s, ...);
__device__ inline png_byte * getPixel(png_byte * img, int w, int h,
int x, int y, size_t pixelSize){
int idx = y * w + x;
return &(img[idx * pixelSize]);
}
__device__ inline void setPixel(png_byte * pxIn, png_byte * pxOut,
size_t pixelSize){
for (int i = 0; i < pixelSize; i++){
pxOut[i] = pxIn[i];
}
}
__device__ inline void addPixel(png_byte * pxIn, png_byte * pxOut,
float alpha, float betta, size_t pixelSize){
for (int i = 0; i < pixelSize; i++){
pxOut[i] = (png_byte)(((double)pxOut[i]) + (((double)pxIn[i]) * alpha));
}
}
__device__ inline void setPixelForRobertFilter(
png_byte * img, png_byte * imgOut,
int width, int height,
int x, int y, size_t pixelSize){
int idx[][2] =
{
{-1, -1}, {0, -1}, {1, -1},
{-1, 0}, {0, 0}, {1, 0},
{-1, 1}, {0, 1}, {1, 1}
};
int sobel[] =
{
0, 0, 0,
0, 1, 0,
0, 0, 0
};
png_byte * pxOut =
getPixel(imgOut, width, height, x, y, pixelSize);
png_byte pxOutLoc[4] = {0};
for (int i = 0; i < 9; i++){
png_byte * pxIn =
getPixel(img, width, height,
x + idx[i][0], y + idx[i][1], pixelSize);
addPixel(pxIn, pxOutLoc, ((double)sobel[i]) / 2, 1, pixelSize);
}
addPixel(pxOutLoc, pxOutLoc, 0, 2, pixelSize);
setPixel(pxOutLoc, pxOut, pixelSize);
}
__global__ void filter(png_byte * img, png_byte * imgOut,
int width, int height, size_t pixelSize){
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (!(x + 1 < width && y + 1 < height && x > 0 && y > 0)){
return;
}
//for (int k = 0; k < 100000; k++){
const int idx[][2] =
{
{-1, -1}, {0, -1}, {1, -1},
{-1, 0}, {0, 0}, {1, 0},
{-1, 1}, {0, 1}, {1, 1}
};
const float sobel[] =
{
0.025, 0.1, 0.025,
0.1, 0.5, 0.1,
0.025, 0.1, 0.025
};
png_byte * pxOut = &(imgOut[(y * width + x) * pixelSize]);
uint3 px;
for (int i = 0; i < 9; i++){
png_byte * pxIn = &(img[((y + idx[i][1]) * width + (x + idx[i][0])) * pixelSize]);
px.x += (float)sobel[i] * pxIn[0];
px.y += (float)sobel[i] * pxIn[1];
px.z += (float)sobel[i] * pxIn[2];
}
pxOut[0] = px.x;
pxOut[1] = px.y;
pxOut[2] = px.z;
pxOut[3] = 255;
/*png_byte * pxOut = &(imgOut[(y * width + x) * pixelSize]);
png_byte * pxIn = &(img[(y * width + x) * pixelSize]);
pxOut[0] = pxIn[0];
pxOut[1] = pxIn[1];
pxOut[2] = pxIn[2];
pxOut[3] = 255;*/
//}
}
bool initCuda(){
int deviceCount = 0;
hipError_t error;
error = hipGetDeviceCount(&deviceCount);
if (hipSuccess != error){
printf("Error in hipGetDeviceCount: %s\n", hipGetErrorString(error));
return false;
}
printf("hipGetDeviceCount = %x\n", deviceCount);
hipGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n",
device, deviceProp.major, deviceProp.minor);
}
int deviceID = 0;
hipDeviceProp_t devProp;
error = hipGetDeviceProperties(&devProp, deviceID);
if (hipSuccess != error){
printf("Error in hipGetDeviceProperties: %d\n", hipGetErrorString(error));
return false;
}
return true;
}
struct ThreadArg{
size_t device;
size_t count;
png_bytep * row_pointers;
png_bytep * row_pointers_res;
size_t rowSize;
size_t height;
size_t width;
int * i;
};
void simpleInc(int * j){
for (int i = 0; i < 1000 * 1000 * 100; i++){
(*j)++;
}
}
static CUT_THREADPROC solverThread(ThreadArg *plan){
printf("Started thread %d\n", plan->device);
hipError_t error;
// Init GPU
error = hipSetDevice(plan->device);
if (hipSuccess != error){
printf("Error in hipSetDevice: %s\n", hipGetErrorString(error));
return;
}
size_t height = plan->height;
size_t width = plan->width;
// start kernel
dim3 threads = dim3(32, 32);
dim3 blocks = dim3(ceil(width/(float)threads.x), ceil(height/(float)threads.y / (plan->count)));
size_t heightForDevice = blocks.y * threads.y + 2; // for edges
int yOffet = blocks.y * threads.y * (plan->device) - 1;
//if (plan->device == 0){
if (yOffet < 0){
yOffet = 0;
heightForDevice -= 1;
}
png_byte * row_pointers_device;
png_byte * row_pointers_device_out;
printf("Thread start copy %d\n", plan->device);
hipMalloc(&row_pointers_device, heightForDevice * plan->rowSize);
hipMalloc(&row_pointers_device_out, heightForDevice * plan->rowSize);
for (int i = 0; i < heightForDevice && i < height; i++){
hipMemcpy(&(row_pointers_device[i * plan->rowSize]), (plan->row_pointers)[i + yOffet],
plan->rowSize, hipMemcpyHostToDevice);
}
printf("Thread end copy %d\n", plan->device);
//hipDeviceSynchronize();
printf("Thread %d: %d\n", plan->device, yOffet);
hipLaunchKernelGGL(( filter), dim3(blocks), dim3(threads), 0, 0, row_pointers_device,
row_pointers_device_out, width, heightForDevice, plan->rowSize / width);
printf("Thread %d: filter end\n", plan->device);
// copy res png to host
//if (plan->device == 0){
for (int i = 1 ; (i < heightForDevice - 1) && (yOffet + i < height); i++){
hipMemcpy((plan->row_pointers_res)[yOffet + i], &(row_pointers_device_out[i * plan->rowSize]),
plan->rowSize, hipMemcpyDeviceToHost);
}
printf("Thread %d: copy end\n", plan->device);
//hipDeviceSynchronize();
//hipDeviceReset();
CUT_THREADEND;
}
int main(int argc, char ** args){
//hipError_t error;
png_structp png_ptr;
png_infop info_ptr;
png_bytep * row_pointers;
png_bytep * row_pointers_res;
// args
char * file_name;
if (argc > 1){
file_name = args[1];
} else {
abort("You should to add fileName to args.\n ./out [fileName]");
}
/*if (!initCuda()){
return 0;
}
*/
openPng(file_name, &png_ptr, &info_ptr, &row_pointers);
int width = png_get_image_width(png_ptr, info_ptr);
int height = png_get_image_height(png_ptr, info_ptr);
png_byte color_type = png_get_color_type(png_ptr, info_ptr);
png_byte bit_depth = png_get_bit_depth(png_ptr, info_ptr);
size_t rowSize = png_get_rowbytes(png_ptr,info_ptr);
row_pointers_res = (png_bytep*) malloc(sizeof(png_bytep) * height);
for (int y = 0; y < height; y++){
row_pointers_res[y] =
(png_byte*) malloc(rowSize);
}
int GPU_N = 0;
hipGetDeviceCount(&GPU_N);
ThreadArg solverOpt[GPU_N];
CUTThread threadID[GPU_N];
for(int i = 0; i < GPU_N; i++){
solverOpt[i].device = i;
solverOpt[i].count = GPU_N;
solverOpt[i].row_pointers = row_pointers;
solverOpt[i].row_pointers_res = row_pointers_res;
solverOpt[i].rowSize = rowSize;
solverOpt[i].width = width;
solverOpt[i].height = height;
}
//Start CPU thread for each GPU
for(int gpuIndex = 0; gpuIndex < GPU_N; gpuIndex++){
printf("Starting thread %d\n", gpuIndex);
//usleep(1000);
threadID[gpuIndex] = cutStartThread((CUT_THREADROUTINE)solverThread,
&solverOpt[gpuIndex]);
}
//sleep(2);
printf("Wait threads\n");
//waiting for GPU results
cutWaitForThreads(threadID, GPU_N);
printf("Threads finished\n");
// save png
savePng("outImg.png", png_ptr, info_ptr, row_pointers_res);
for (int y=0; y<height; y++){
free(row_pointers[y]);
}
free(row_pointers);
printf("\nFinished\n");
}
| 2d2ee5525da41722ed2967dc24804e368d85ec3d.cu | /*
Cource - "Разработка приложений на CUDA "
Task 3:
Модифицировать предыдущую программу так, чтобы использовались
все имеющиеся в распоряжение программы GPU. Программа должна
определять количество доступных GPU и распределять работу по ним.
Written by Pavel Santaev
*/
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include <png.h>
#include <multithreading.cpp>
#include <libpng.h>
void abort(const char * s, ...);
__device__ inline png_byte * getPixel(png_byte * img, int w, int h,
int x, int y, size_t pixelSize){
int idx = y * w + x;
return &(img[idx * pixelSize]);
}
__device__ inline void setPixel(png_byte * pxIn, png_byte * pxOut,
size_t pixelSize){
for (int i = 0; i < pixelSize; i++){
pxOut[i] = pxIn[i];
}
}
__device__ inline void addPixel(png_byte * pxIn, png_byte * pxOut,
float alpha, float betta, size_t pixelSize){
for (int i = 0; i < pixelSize; i++){
pxOut[i] = (png_byte)(((double)pxOut[i]) + (((double)pxIn[i]) * alpha));
}
}
__device__ inline void setPixelForRobertFilter(
png_byte * img, png_byte * imgOut,
int width, int height,
int x, int y, size_t pixelSize){
int idx[][2] =
{
{-1, -1}, {0, -1}, {1, -1},
{-1, 0}, {0, 0}, {1, 0},
{-1, 1}, {0, 1}, {1, 1}
};
int sobel[] =
{
0, 0, 0,
0, 1, 0,
0, 0, 0
};
png_byte * pxOut =
getPixel(imgOut, width, height, x, y, pixelSize);
png_byte pxOutLoc[4] = {0};
for (int i = 0; i < 9; i++){
png_byte * pxIn =
getPixel(img, width, height,
x + idx[i][0], y + idx[i][1], pixelSize);
addPixel(pxIn, pxOutLoc, ((double)sobel[i]) / 2, 1, pixelSize);
}
addPixel(pxOutLoc, pxOutLoc, 0, 2, pixelSize);
setPixel(pxOutLoc, pxOut, pixelSize);
}
__global__ void filter(png_byte * img, png_byte * imgOut,
int width, int height, size_t pixelSize){
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (!(x + 1 < width && y + 1 < height && x > 0 && y > 0)){
return;
}
//for (int k = 0; k < 100000; k++){
const int idx[][2] =
{
{-1, -1}, {0, -1}, {1, -1},
{-1, 0}, {0, 0}, {1, 0},
{-1, 1}, {0, 1}, {1, 1}
};
const float sobel[] =
{
0.025, 0.1, 0.025,
0.1, 0.5, 0.1,
0.025, 0.1, 0.025
};
png_byte * pxOut = &(imgOut[(y * width + x) * pixelSize]);
uint3 px;
for (int i = 0; i < 9; i++){
png_byte * pxIn = &(img[((y + idx[i][1]) * width + (x + idx[i][0])) * pixelSize]);
px.x += (float)sobel[i] * pxIn[0];
px.y += (float)sobel[i] * pxIn[1];
px.z += (float)sobel[i] * pxIn[2];
}
pxOut[0] = px.x;
pxOut[1] = px.y;
pxOut[2] = px.z;
pxOut[3] = 255;
/*png_byte * pxOut = &(imgOut[(y * width + x) * pixelSize]);
png_byte * pxIn = &(img[(y * width + x) * pixelSize]);
pxOut[0] = pxIn[0];
pxOut[1] = pxIn[1];
pxOut[2] = pxIn[2];
pxOut[3] = 255;*/
//}
}
bool initCuda(){
int deviceCount = 0;
cudaError_t error;
error = cudaGetDeviceCount(&deviceCount);
if (cudaSuccess != error){
printf("Error in cudaGetDeviceCount: %s\n", cudaGetErrorString(error));
return false;
}
printf("cudaGetDeviceCount = %x\n", deviceCount);
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n",
device, deviceProp.major, deviceProp.minor);
}
int deviceID = 0;
cudaDeviceProp devProp;
error = cudaGetDeviceProperties(&devProp, deviceID);
if (cudaSuccess != error){
printf("Error in cudaGetDeviceProperties: %d\n", cudaGetErrorString(error));
return false;
}
return true;
}
struct ThreadArg{
size_t device;
size_t count;
png_bytep * row_pointers;
png_bytep * row_pointers_res;
size_t rowSize;
size_t height;
size_t width;
int * i;
};
void simpleInc(int * j){
for (int i = 0; i < 1000 * 1000 * 100; i++){
(*j)++;
}
}
static CUT_THREADPROC solverThread(ThreadArg *plan){
printf("Started thread %d\n", plan->device);
cudaError_t error;
// Init GPU
error = cudaSetDevice(plan->device);
if (cudaSuccess != error){
printf("Error in cudaSetDevice: %s\n", cudaGetErrorString(error));
return;
}
size_t height = plan->height;
size_t width = plan->width;
// start kernel
dim3 threads = dim3(32, 32);
dim3 blocks = dim3(ceil(width/(float)threads.x), ceil(height/(float)threads.y / (plan->count)));
size_t heightForDevice = blocks.y * threads.y + 2; // for edges
int yOffet = blocks.y * threads.y * (plan->device) - 1;
//if (plan->device == 0){
if (yOffet < 0){
yOffet = 0;
heightForDevice -= 1;
}
png_byte * row_pointers_device;
png_byte * row_pointers_device_out;
printf("Thread start copy %d\n", plan->device);
cudaMalloc(&row_pointers_device, heightForDevice * plan->rowSize);
cudaMalloc(&row_pointers_device_out, heightForDevice * plan->rowSize);
for (int i = 0; i < heightForDevice && i < height; i++){
cudaMemcpy(&(row_pointers_device[i * plan->rowSize]), (plan->row_pointers)[i + yOffet],
plan->rowSize, cudaMemcpyHostToDevice);
}
printf("Thread end copy %d\n", plan->device);
//cudaThreadSynchronize();
printf("Thread %d: %d\n", plan->device, yOffet);
filter<<<blocks, threads>>>(row_pointers_device,
row_pointers_device_out, width, heightForDevice, plan->rowSize / width);
printf("Thread %d: filter end\n", plan->device);
// copy res png to host
//if (plan->device == 0){
for (int i = 1 ; (i < heightForDevice - 1) && (yOffet + i < height); i++){
cudaMemcpy((plan->row_pointers_res)[yOffet + i], &(row_pointers_device_out[i * plan->rowSize]),
plan->rowSize, cudaMemcpyDeviceToHost);
}
printf("Thread %d: copy end\n", plan->device);
//cudaThreadSynchronize();
//cudaThreadExit();
CUT_THREADEND;
}
int main(int argc, char ** args){
//cudaError_t error;
png_structp png_ptr;
png_infop info_ptr;
png_bytep * row_pointers;
png_bytep * row_pointers_res;
// args
char * file_name;
if (argc > 1){
file_name = args[1];
} else {
abort("You should to add fileName to args.\n ./out [fileName]");
}
/*if (!initCuda()){
return 0;
}
*/
openPng(file_name, &png_ptr, &info_ptr, &row_pointers);
int width = png_get_image_width(png_ptr, info_ptr);
int height = png_get_image_height(png_ptr, info_ptr);
png_byte color_type = png_get_color_type(png_ptr, info_ptr);
png_byte bit_depth = png_get_bit_depth(png_ptr, info_ptr);
size_t rowSize = png_get_rowbytes(png_ptr,info_ptr);
row_pointers_res = (png_bytep*) malloc(sizeof(png_bytep) * height);
for (int y = 0; y < height; y++){
row_pointers_res[y] =
(png_byte*) malloc(rowSize);
}
int GPU_N = 0;
cudaGetDeviceCount(&GPU_N);
ThreadArg solverOpt[GPU_N];
CUTThread threadID[GPU_N];
for(int i = 0; i < GPU_N; i++){
solverOpt[i].device = i;
solverOpt[i].count = GPU_N;
solverOpt[i].row_pointers = row_pointers;
solverOpt[i].row_pointers_res = row_pointers_res;
solverOpt[i].rowSize = rowSize;
solverOpt[i].width = width;
solverOpt[i].height = height;
}
//Start CPU thread for each GPU
for(int gpuIndex = 0; gpuIndex < GPU_N; gpuIndex++){
printf("Starting thread %d\n", gpuIndex);
//usleep(1000);
threadID[gpuIndex] = cutStartThread((CUT_THREADROUTINE)solverThread,
&solverOpt[gpuIndex]);
}
//sleep(2);
printf("Wait threads\n");
//waiting for GPU results
cutWaitForThreads(threadID, GPU_N);
printf("Threads finished\n");
// save png
savePng("outImg.png", png_ptr, info_ptr, row_pointers_res);
for (int y=0; y<height; y++){
free(row_pointers[y]);
}
free(row_pointers);
printf("\nFinished\n");
}
|
7e795e5e3e3bbacd8a722fd355430f8dafb0bbad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/vec_distance.hpp"
#include "opencv2/core/cuda/datamov_utils.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace bf_match
{
///////////////////////////////////////////////////////////////////////////////
// Reduction
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, float* s_distance, int* s_trainIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, threadIdx.x, less<float>());
}
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, int& bestImgIdx, float* s_distance, int* s_trainIdx, int* s_imgIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_imgIdx += threadIdx.y * BLOCK_SIZE;
reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, smem_tuple(s_trainIdx, s_imgIdx), thrust::tie(bestTrainIdx, bestImgIdx), threadIdx.x, less<float>());
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled Cached
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U>
__device__ void loadQueryToSmem(int queryIdx, const PtrStepSz<T>& query, U* s_query)
{
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolledCached(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < train.cols)
{
T val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolled(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__device__ void loop(int queryIdx, const PtrStepSz<T>& query, volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match dispatcher
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, train, mask, trainIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, train, mask, trainIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, train, mask, trainIdx, distance, stream);
}
}
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// Match caller
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (mask.data)
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
template void matchL1_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL1_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (mask.data)
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
//template void matchL2_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL2_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (mask.data)
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
template void matchHamming_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchHamming_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchHamming_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchHamming_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchHamming_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, hipStream_t stream);
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (masks.data)
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
template void matchL1_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL1_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL1_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (masks.data)
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
//template void matchL2_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchL2_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchL2_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& maskCollection, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
hipStream_t stream)
{
if (masks.data)
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
template void matchHamming_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchHamming_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchHamming_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
//template void matchHamming_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
template void matchHamming_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, hipStream_t stream);
} // namespace bf_match
}}} // namespace cv { namespace gpu { namespace cudev {
#endif /* CUDA_DISABLER */
| 7e795e5e3e3bbacd8a722fd355430f8dafb0bbad.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/vec_distance.hpp"
#include "opencv2/core/cuda/datamov_utils.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace bf_match
{
///////////////////////////////////////////////////////////////////////////////
// Reduction
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, float* s_distance, int* s_trainIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, threadIdx.x, less<float>());
}
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, int& bestImgIdx, float* s_distance, int* s_trainIdx, int* s_imgIdx)
{
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_imgIdx += threadIdx.y * BLOCK_SIZE;
reduceKeyVal<BLOCK_SIZE>(s_distance, bestDistance, smem_tuple(s_trainIdx, s_imgIdx), thrust::tie(bestTrainIdx, bestImgIdx), threadIdx.x, less<float>());
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled Cached
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U>
__device__ void loadQueryToSmem(int queryIdx, const PtrStepSz<T>& query, U* s_query)
{
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolledCached(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < train.cols)
{
T val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolled(int queryIdx, const PtrStepSz<T>& query,volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__device__ void loop(int queryIdx, const PtrStepSz<T>& query, volatile int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && distVal < bestDistance && mask(queryIdx, trainIdx))
{
bestImgIdx = imgIdx;
bestDistance = distVal;
bestTrainIdx = trainIdx;
}
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask,
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
float myBestDistance = numeric_limits<float>::max();
int myBestTrainIdx = -1;
int myBestImgIdx = -1;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance, myBestTrainIdx, myBestImgIdx);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdxIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance, myBestTrainIdx, myBestImgIdx, s_distance, s_trainIdx, s_imgIdxIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = myBestTrainIdx;
bestImgIdx[queryIdx] = myBestImgIdx;
bestDistance[queryIdx] = myBestDistance;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match dispatcher
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, train, mask, trainIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, train, mask, trainIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, train, mask, trainIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, train, mask, trainIdx, distance, stream);
}
}
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}*/
else
{
match<16, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// Match caller
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (mask.data)
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
template void matchL1_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL1_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (mask.data)
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
//template void matchL2_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL2_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, const PtrStepSzb& mask,
const PtrStepSzi& trainIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (mask.data)
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), SingleMask(mask),
trainIdx, distance,
stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), WithOutMask(),
trainIdx, distance,
stream);
}
}
template void matchHamming_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchHamming_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchHamming_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchHamming_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchHamming_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, const PtrStepSzb& mask, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, cudaStream_t stream);
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (masks.data)
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
template void matchL1_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL1_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL1_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (masks.data)
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
//template void matchL2_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchL2_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchL2_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& maskCollection, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance,
cudaStream_t stream)
{
if (masks.data)
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),
trainIdx, imgIdx, distance,
stream);
}
else
{
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(),
trainIdx, imgIdx, distance,
stream);
}
}
template void matchHamming_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchHamming_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchHamming_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
//template void matchHamming_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
template void matchHamming_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzi& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzf& distance, cudaStream_t stream);
} // namespace bf_match
}}} // namespace cv { namespace gpu { namespace cudev {
#endif /* CUDA_DISABLER */
|
594eb6e2277ffc8bee7edf879f18a4a3f04463ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include <random>
#include <iostream>
#include "hiprand/hiprand_kernel.h"
#include "egblas/logistic_noise.hpp"
#include "egblas/cuda_check.hpp"
// Kernel to setup the random states
__global__ void ln_setup_kernel(hiprandState_t* states, size_t seed) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init(seed, id, 0, &states[id]);
}
// Kernels for logistic noise
template<typename T>
__device__ T logistic_sigmoid(T x){
return T(1) / (T(1) + exp(-x));
}
template <typename T>
__global__ void logistic_noise_kernel(hiprandState_t* states, size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
const auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto stride = blockDim.x * gridDim.x;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
y[incy * index] = alpha * (x[incx * index] + hiprand_normal(&local_state) * logistic_sigmoid(x[incx * index]));
}
// Copy state back to global memory
states[base_index] = local_state;
}
template <typename T>
__global__ void logistic_noise_kernel1(hiprandState_t* states, size_t n, T alpha, const T* x, T* y) {
const auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto stride = blockDim.x * gridDim.x;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
y[index] = alpha * (x[index] + hiprand_normal(&local_state) * logistic_sigmoid(x[index]));
}
// Copy state back to global memory
states[base_index] = local_state;
}
template <typename T>
__global__ void logistic_noise_kernel_alpha1(hiprandState_t* states, size_t n, const T* x, size_t incx, T* y, size_t incy) {
const auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto stride = blockDim.x * gridDim.x;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
y[incy * index] = x[incx * index] + hiprand_normal(&local_state) * logistic_sigmoid(x[incx * index]);
}
// Copy state back to global memory
states[base_index] = local_state;
}
template <typename T>
__global__ void logistic_noise_kernel1_alpha1(hiprandState_t* states, size_t n, const T* x, T* y) {
const auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto stride = blockDim.x * gridDim.x;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
y[index] = x[index] + hiprand_normal(&local_state) * T(logistic_sigmoid(x[index]));
}
// Copy state back to global memory
states[base_index] = local_state;
}
// Kernel for reset (when alpha = 0)
template <typename T>
__global__ void logistic_noise_kernel0(size_t n, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
y[incy * index] = T(0);
}
}
template <typename T>
void logistic_noise_kernel0_run(size_t n, T* y, size_t incy) {
static int blockSize;
static int minGridSize;
if (!blockSize) {
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, logistic_noise_kernel0<T>, 0, 0);
}
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( logistic_noise_kernel0<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
hipDeviceSynchronize();
#endif
}
// Preparation
void* egblas_logistic_noise_prepare(){
std::random_device rd;
return egblas_logistic_noise_prepare_seed(rd());
}
void* egblas_logistic_noise_prepare_seed(size_t seed){
// Allocate room for the states
hiprandState_t* states;
cuda_check(hipMalloc((void**)&states, 64 * 64 * sizeof(hiprandState_t)));
// Initialize the seeds
hipLaunchKernelGGL(( ln_setup_kernel), dim3(64), dim3(64), 0, 0, states, seed);
return states;
}
void egblas_logistic_noise_release(void* states){
// Free the states
cuda_check(hipFree(states));
}
// Regular logistic_noise
void egblas_slogistic_noise_seed(size_t n, float alpha, const float* x, size_t incx, float * y, size_t incy, size_t seed) {
if (alpha == 0.0f) {
logistic_noise_kernel0_run(n, y, incy);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
hiprandState_t* states;
cuda_check(hipMalloc((void**)&states, gridSize * blockSize * sizeof(hiprandState_t)));
// Initialize the seeds
hipLaunchKernelGGL(( ln_setup_kernel), dim3(gridSize), dim3(blockSize), 0, 0, states, seed);
// Compute the logistic_noise
if (incx == 1 && incy == 1) {
if (alpha == 1.0) {
hipLaunchKernelGGL(( logistic_noise_kernel1_alpha1<float>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, x, y);
} else {
hipLaunchKernelGGL(( logistic_noise_kernel1<float>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, alpha, x, y);
}
} else {
if (alpha == 1.0) {
hipLaunchKernelGGL(( logistic_noise_kernel_alpha1<float>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, x, incx, y, incy);
} else {
hipLaunchKernelGGL(( logistic_noise_kernel<float>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, alpha, x, incx, y, incy);
}
}
// Free the states
cuda_check(hipFree(states));
}
void egblas_slogistic_noise(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) {
std::random_device rd;
egblas_slogistic_noise_seed(n, alpha, x, incx, y, incy, rd());
}
void egblas_dlogistic_noise_seed(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy, size_t seed) {
if (alpha == 0.0) {
logistic_noise_kernel0_run(n, y, incy);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
hiprandState_t* states;
cuda_check(hipMalloc((void**)&states, gridSize * blockSize * sizeof(hiprandState_t)));
// Initialize the seeds
hipLaunchKernelGGL(( ln_setup_kernel), dim3(gridSize), dim3(blockSize), 0, 0, states, seed);
// Compute the logistic_noise
if (incx == 1 && incy == 1) {
if (alpha == 1.0) {
hipLaunchKernelGGL(( logistic_noise_kernel1_alpha1<double>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, x, y);
} else {
hipLaunchKernelGGL(( logistic_noise_kernel1<double>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, alpha, x, y);
}
} else {
if (alpha == 1.0) {
hipLaunchKernelGGL(( logistic_noise_kernel_alpha1<double>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, x, incx, y, incy);
} else {
hipLaunchKernelGGL(( logistic_noise_kernel<double>), dim3(gridSize), dim3(blockSize), 0, 0, states, n, alpha, x, incx, y, incy);
}
}
// Free the states
cuda_check(hipFree(states));
}
void egblas_dlogistic_noise(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) {
std::random_device rd;
egblas_dlogistic_noise_seed(n, alpha, x, incx, y, incy, rd());
}
// Function with stats
void egblas_slogistic_noise_states(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy, void* states) {
if (alpha == 0.0f) {
logistic_noise_kernel0_run(n, y, incy);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the logistic_noise
hiprandState_t* cstates = reinterpret_cast<hiprandState_t*>(states);
if (incx == 1 && incy == 1) {
if (alpha == 1.0f) {
hipLaunchKernelGGL(( logistic_noise_kernel1_alpha1<float>), dim3(gridSize), dim3(blockSize), 0, 0, cstates, n, x, y);
} else {
hipLaunchKernelGGL(( logistic_noise_kernel1<float>), dim3(gridSize), dim3(blockSize), 0, 0, cstates, n, alpha, x, y);
}
} else {
if (alpha == 1.0f) {
hipLaunchKernelGGL(( logistic_noise_kernel_alpha1<float>), dim3(gridSize), dim3(blockSize), 0, 0, cstates, n, x, incx, y, incy);
} else {
hipLaunchKernelGGL(( logistic_noise_kernel<float>), dim3(gridSize), dim3(blockSize), 0, 0, cstates, n, alpha, x, incx, y, incy);
}
}
}
void egblas_dlogistic_noise_states(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy, void* states) {
if (alpha == 0.0) {
logistic_noise_kernel0_run(n, y, incy);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the logistic_noise
hiprandState_t* cstates = reinterpret_cast<hiprandState_t*>(states);
if (incx == 1 && incy == 1) {
if (alpha == 1.0) {
hipLaunchKernelGGL(( logistic_noise_kernel1_alpha1<double>), dim3(gridSize), dim3(blockSize), 0, 0, cstates, n, x, y);
} else {
hipLaunchKernelGGL(( logistic_noise_kernel1<double>), dim3(gridSize), dim3(blockSize), 0, 0, cstates, n, alpha, x, y);
}
} else {
if (alpha == 1.0) {
hipLaunchKernelGGL(( logistic_noise_kernel_alpha1<double>), dim3(gridSize), dim3(blockSize), 0, 0, cstates, n, x, incx, y, incy);
} else {
hipLaunchKernelGGL(( logistic_noise_kernel<double>), dim3(gridSize), dim3(blockSize), 0, 0, cstates, n, alpha, x, incx, y, incy);
}
}
}
| 594eb6e2277ffc8bee7edf879f18a4a3f04463ef.cu | //=======================================================================
// Copyright (c) 2017 Baptiste Wicht
// Distributed under the terms of the MIT License.
// (See accompanying file LICENSE or copy at
// http://opensource.org/licenses/MIT)
//=======================================================================
#include <random>
#include <iostream>
#include "curand_kernel.h"
#include "egblas/logistic_noise.hpp"
#include "egblas/cuda_check.hpp"
// Kernel to setup the random states
__global__ void ln_setup_kernel(curandState* states, size_t seed) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed, id, 0, &states[id]);
}
// Kernels for logistic noise
template<typename T>
__device__ T logistic_sigmoid(T x){
return T(1) / (T(1) + exp(-x));
}
template <typename T>
__global__ void logistic_noise_kernel(curandState* states, size_t n, T alpha, const T* x, size_t incx, T* y, size_t incy) {
const auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto stride = blockDim.x * gridDim.x;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
y[incy * index] = alpha * (x[incx * index] + curand_normal(&local_state) * logistic_sigmoid(x[incx * index]));
}
// Copy state back to global memory
states[base_index] = local_state;
}
template <typename T>
__global__ void logistic_noise_kernel1(curandState* states, size_t n, T alpha, const T* x, T* y) {
const auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto stride = blockDim.x * gridDim.x;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
y[index] = alpha * (x[index] + curand_normal(&local_state) * logistic_sigmoid(x[index]));
}
// Copy state back to global memory
states[base_index] = local_state;
}
template <typename T>
__global__ void logistic_noise_kernel_alpha1(curandState* states, size_t n, const T* x, size_t incx, T* y, size_t incy) {
const auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto stride = blockDim.x * gridDim.x;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
y[incy * index] = x[incx * index] + curand_normal(&local_state) * logistic_sigmoid(x[incx * index]);
}
// Copy state back to global memory
states[base_index] = local_state;
}
template <typename T>
__global__ void logistic_noise_kernel1_alpha1(curandState* states, size_t n, const T* x, T* y) {
const auto base_index = threadIdx.x + blockIdx.x * blockDim.x;
const auto stride = blockDim.x * gridDim.x;
// Copy state to local memory for efficiency
auto local_state = states[base_index];
for (auto index = base_index; index < n; index += stride) {
y[index] = x[index] + curand_normal(&local_state) * T(logistic_sigmoid(x[index]));
}
// Copy state back to global memory
states[base_index] = local_state;
}
// Kernel for reset (when alpha = 0)
template <typename T>
__global__ void logistic_noise_kernel0(size_t n, T* y, size_t incy) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
y[incy * index] = T(0);
}
}
template <typename T>
void logistic_noise_kernel0_run(size_t n, T* y, size_t incy) {
static int blockSize;
static int minGridSize;
if (!blockSize) {
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, logistic_noise_kernel0<T>, 0, 0);
}
int gridSize = ((n / incy) + blockSize - 1) / blockSize;
logistic_noise_kernel0<T><<<gridSize, blockSize>>>(n, y, incy);
#ifdef EGBLAS_SYNCHRONIZE
cudaDeviceSynchronize();
#endif
}
// Preparation
void* egblas_logistic_noise_prepare(){
std::random_device rd;
return egblas_logistic_noise_prepare_seed(rd());
}
void* egblas_logistic_noise_prepare_seed(size_t seed){
// Allocate room for the states
curandState* states;
cuda_check(cudaMalloc((void**)&states, 64 * 64 * sizeof(curandState)));
// Initialize the seeds
ln_setup_kernel<<<64, 64>>>(states, seed);
return states;
}
void egblas_logistic_noise_release(void* states){
// Free the states
cuda_check(cudaFree(states));
}
// Regular logistic_noise
void egblas_slogistic_noise_seed(size_t n, float alpha, const float* x, size_t incx, float * y, size_t incy, size_t seed) {
if (alpha == 0.0f) {
logistic_noise_kernel0_run(n, y, incy);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
curandState* states;
cuda_check(cudaMalloc((void**)&states, gridSize * blockSize * sizeof(curandState)));
// Initialize the seeds
ln_setup_kernel<<<gridSize, blockSize>>>(states, seed);
// Compute the logistic_noise
if (incx == 1 && incy == 1) {
if (alpha == 1.0) {
logistic_noise_kernel1_alpha1<float><<<gridSize, blockSize>>>(states, n, x, y);
} else {
logistic_noise_kernel1<float><<<gridSize, blockSize>>>(states, n, alpha, x, y);
}
} else {
if (alpha == 1.0) {
logistic_noise_kernel_alpha1<float><<<gridSize, blockSize>>>(states, n, x, incx, y, incy);
} else {
logistic_noise_kernel<float><<<gridSize, blockSize>>>(states, n, alpha, x, incx, y, incy);
}
}
// Free the states
cuda_check(cudaFree(states));
}
void egblas_slogistic_noise(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) {
std::random_device rd;
egblas_slogistic_noise_seed(n, alpha, x, incx, y, incy, rd());
}
void egblas_dlogistic_noise_seed(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy, size_t seed) {
if (alpha == 0.0) {
logistic_noise_kernel0_run(n, y, incy);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
if (n <= 100) {
gridSize = 1;
blockSize = 64;
} else if(n <= 1000){
gridSize = 8;
blockSize = 64;
} else if(n <= 10000){
gridSize = 16;
blockSize = 64;
} else if(n <= 100000){
gridSize = 32;
blockSize = 64;
}
// Allocate room for the states
curandState* states;
cuda_check(cudaMalloc((void**)&states, gridSize * blockSize * sizeof(curandState)));
// Initialize the seeds
ln_setup_kernel<<<gridSize, blockSize>>>(states, seed);
// Compute the logistic_noise
if (incx == 1 && incy == 1) {
if (alpha == 1.0) {
logistic_noise_kernel1_alpha1<double><<<gridSize, blockSize>>>(states, n, x, y);
} else {
logistic_noise_kernel1<double><<<gridSize, blockSize>>>(states, n, alpha, x, y);
}
} else {
if (alpha == 1.0) {
logistic_noise_kernel_alpha1<double><<<gridSize, blockSize>>>(states, n, x, incx, y, incy);
} else {
logistic_noise_kernel<double><<<gridSize, blockSize>>>(states, n, alpha, x, incx, y, incy);
}
}
// Free the states
cuda_check(cudaFree(states));
}
void egblas_dlogistic_noise(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) {
std::random_device rd;
egblas_dlogistic_noise_seed(n, alpha, x, incx, y, incy, rd());
}
// Function with stats
void egblas_slogistic_noise_states(size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy, void* states) {
if (alpha == 0.0f) {
logistic_noise_kernel0_run(n, y, incy);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the logistic_noise
curandState* cstates = reinterpret_cast<curandState*>(states);
if (incx == 1 && incy == 1) {
if (alpha == 1.0f) {
logistic_noise_kernel1_alpha1<float><<<gridSize, blockSize>>>(cstates, n, x, y);
} else {
logistic_noise_kernel1<float><<<gridSize, blockSize>>>(cstates, n, alpha, x, y);
}
} else {
if (alpha == 1.0f) {
logistic_noise_kernel_alpha1<float><<<gridSize, blockSize>>>(cstates, n, x, incx, y, incy);
} else {
logistic_noise_kernel<float><<<gridSize, blockSize>>>(cstates, n, alpha, x, incx, y, incy);
}
}
}
void egblas_dlogistic_noise_states(size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy, void* states) {
if (alpha == 0.0) {
logistic_noise_kernel0_run(n, y, incy);
return;
}
size_t gridSize = 64;
size_t blockSize = 64;
// Compute the logistic_noise
curandState* cstates = reinterpret_cast<curandState*>(states);
if (incx == 1 && incy == 1) {
if (alpha == 1.0) {
logistic_noise_kernel1_alpha1<double><<<gridSize, blockSize>>>(cstates, n, x, y);
} else {
logistic_noise_kernel1<double><<<gridSize, blockSize>>>(cstates, n, alpha, x, y);
}
} else {
if (alpha == 1.0) {
logistic_noise_kernel_alpha1<double><<<gridSize, blockSize>>>(cstates, n, x, incx, y, incy);
} else {
logistic_noise_kernel<double><<<gridSize, blockSize>>>(cstates, n, alpha, x, incx, y, incy);
}
}
}
|
7fa97687ea92465cb4ad8af690dc913b36d3e853.hip | // !!! This is a file automatically generated by hipify!!!
/***********************************************
* # Copyright 2011. Thuy Diem Nguyen & Zejun Zheng
* # Contact: [email protected] or [email protected]
* #
* # GPL 3.0 applies.
* #
* ************************************************/
// Note: don't use_fast_math option
#include "euclidMain.h"
#include "euclidKernel.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
void writeVectorToFile_GPU(thrust::host_vector< thrust::pair<unsigned int, unsigned int> > h_pairVector, thrust::host_vector< float > h_distVector, string pairFileName, string distFileName, unsigned long long count, int fileId) {
FILE * pairFile, * distFile;
string tempStr;
char buf[1000];
sprintf(buf, "_%d", fileId);
tempStr = pairFileName;
tempStr.append(buf);
pairFile = fopen(tempStr.c_str(), "wb");
if (pairFile == NULL){
printf("cannot open pairFile: %s\n", tempStr.c_str());
exit(-1);
}
tempStr = distFileName;
tempStr.append(buf);
distFile = fopen(tempStr.c_str(), "wb");
if (distFile == NULL){
printf("cannot open distFile: %s\n", tempStr.c_str());
exit(-1);
}
thrust::device_vector<float> d_distVector = h_distVector;
thrust::device_vector< thrust::pair<unsigned int, unsigned int> > d_pairVector = h_pairVector;
thrust::sort_by_key(d_distVector.begin(), d_distVector.end(), d_pairVector.begin());
thrust::copy(d_distVector.begin(), d_distVector.end(), h_distVector.begin());
thrust::copy(d_pairVector.begin(), d_pairVector.end(), h_pairVector.begin());
int pairArray[BUF_SIZE*2];
float distArray[BUF_SIZE];
int h = 0;
thrust::pair<unsigned int, unsigned int> aPair;
//cout << "write to : " << tempStr << " " << count << " pairs" << endl;
for (unsigned int i = 0; i < count; ++i)
{
aPair = h_pairVector[i];
distArray[h] = h_distVector[i];
pairArray[h*2] = aPair.first;
pairArray[h*2+1] = aPair.second;
++h;
if (h == BUF_SIZE) {
fwrite(pairArray, sizeof(unsigned int), BUF_SIZE * 2, pairFile);
fwrite(distArray, sizeof(float), BUF_SIZE, distFile);
h = 0;
}
}
if (h > 0) {
fwrite(pairArray, sizeof(unsigned int), h * 2, pairFile);
fwrite(distArray, sizeof(float), h, distFile);
h = 0;
}
fclose(pairFile);
fclose(distFile);
}
void writeToVector(thrust::host_vector< thrust::pair<unsigned int, unsigned int> > & h_pairVector, thrust::host_vector< float > & h_distVector, float *h_out, int stageX, int stageY, int arrayDim, float threshold, unsigned long long & count) {
int i, row, col, rowOffset, colOffset;
float dist;
int arraySize = arrayDim * arrayDim;
rowOffset = stageX * arrayDim;
colOffset = stageY * arrayDim;
// write result to output file
for (i = 0; i < arraySize; ++i)
{
row = rowOffset + (int)i / arrayDim;
col = colOffset + (int)i % arrayDim;
dist = h_out[i];
if (dist < threshold || fabs(dist-threshold) < EPSILON)
{
h_pairVector[count] = thrust::make_pair(row, col);
h_distVector[count] = dist;
++count;
}
}
}
void computeEuclidDist_CUDA(float ** eReads, string pairFileName, string distFileName, int numReads, int numSeeds, float threshold, int arrayDim) {
int i, j, stageX, stageY, row, offset, stageId;
unsigned long long totalNumPairs = 0, count = 0;
int fileId = 0;
int size = arrayDim * arrayDim;
int arraySize = size * NUM_STREAMS;
int gridSize = (arrayDim + BLOCK_DIM - 1)/BLOCK_DIM;
int stageDim = (numReads + arrayDim - 1)/arrayDim;
// determine GRID_DIM and blockSize
dim3 threadsPerBlock(BLOCK_DIM, BLOCK_DIM);
dim3 blocksPerGrid(gridSize, gridSize);
/*
// get number of SMs on this GPU
printf("size: %dx%d, arraySize: %d, stageDim: %dx%d\n", arrayDim, arrayDim, arraySize, stageDim, stageDim);
printf("blockSize: %dx%d, gridSize: %dx%d\n", BLOCK_DIM, BLOCK_DIM, gridSize, gridSize);
*/
// declare host variables
float *h_in;
float *d_in;
checkCudaErrors( hipMalloc((void**)&d_in, numReads * numSeeds * sizeof(float)) );
checkCudaErrors( hipHostMalloc((void**)&h_in, numReads * numSeeds * sizeof(float)) );
for (i = 0; i < numReads; ++i)
{
row = i * numSeeds;
for (j = 0; j < numSeeds; ++j)
h_in[row + j] = eReads[i][j];
}
/*
float *mean, *variance, diff;
mean = (float*) malloc(numSeeds * sizeof(float));
variance = (float*) malloc(numSeeds * sizeof(float));
for (j = 0; j < numSeeds; ++j) {
mean[j] = 0.0f;
variance[j] = 0.0f;
}
for (i = 0; i < numReads; ++i)
for (j = 0; j < numSeeds; ++j)
mean[j] += eReads[i][j];
for (j = 0; j < numSeeds; ++j)
mean[j] /= numSeeds;
for (i = 0; i < numReads; ++i)
for (j = 0; j < numSeeds; ++j)
{
diff = eReads[i][j]-mean[j];
variance[j] += diff * diff;
}
for (j = 0; j < numSeeds; ++j)
variance[j] /= numReads;
float *d_var;
checkCudaErrors( hipMalloc((void**)&d_var, numSeeds * sizeof(float)) );
checkCudaErrors( hipMemcpyAsync(d_var, variance, numSeeds * sizeof(float), hipMemcpyHostToDevice) );
free(mean);
free(variance);
*/
for (i = 0; i < numReads; ++i)
free(eReads[i]);
free(eReads);
checkCudaErrors( hipMemcpyAsync(d_in, h_in, numReads * numSeeds * sizeof(float), hipMemcpyHostToDevice) );
// declare device variables
float *d_out;
float *h_out;
checkCudaErrors( hipMalloc((void**)&d_out, arraySize * sizeof(float)) );
checkCudaErrors( hipHostMalloc((void**)&h_out, arraySize * sizeof(float)) );
hipStream_t streams[NUM_STREAMS];
for (i = 0; i < NUM_STREAMS; ++i)
checkCudaErrors( hipStreamCreate(&streams[i]) );
thrust::host_vector< float > h_distVector (MAX_NUM_PAIRS_GPU * 2);
thrust::host_vector< thrust::pair<unsigned int, unsigned int> > h_pairVector (MAX_NUM_PAIRS_GPU * 2);
int stageSize = stageDim * (stageDim + 1) / 2;
for (j = 0; j < stageSize; j += NUM_STREAMS)
{
for (i = 0; i < NUM_STREAMS; ++i) {
offset = i * size;
stageId = i + j;
if (stageId < stageSize) {
Trag_reverse_eq(stageId, stageDim, stageX, stageY);
//launchEuclidKernel(streams[i], blocksPerGrid, threadsPerBlock, d_in, d_out+offset, numReads, numSeeds, stageX, stageY, arrayDim, d_var);
launchEuclidKernel(streams[i], blocksPerGrid, threadsPerBlock, d_in, d_out+offset, numReads, numSeeds, stageX, stageY, arrayDim);
checkCudaErrors( hipMemcpyAsync(h_out+offset, d_out+offset, size * sizeof(float), hipMemcpyDeviceToHost, streams[i]) );
}
}
hipDeviceSynchronize();
for (i = 0; i < NUM_STREAMS; ++i) {
offset = i * size;
stageId = i + j;
if (stageId < stageSize) {
Trag_reverse_eq(stageId, stageDim, stageX, stageY);
writeToVector(h_pairVector, h_distVector, h_out+offset, stageX, stageY, arrayDim, threshold, count);
}
}
if (count >= MAX_NUM_PAIRS_GPU)
{
h_pairVector.resize(count);
h_distVector.resize(count);
writeVectorToFile_GPU(h_pairVector, h_distVector, pairFileName, distFileName, count, fileId);
h_pairVector.resize(MAX_NUM_PAIRS_GPU * 2);
h_distVector.resize(MAX_NUM_PAIRS_GPU * 2);
++ fileId;
totalNumPairs += count;
count = 0;
}
}
if (count > 0)
{
h_pairVector.resize(count);
h_distVector.resize(count);
writeVectorToFile_GPU(h_pairVector, h_distVector, pairFileName, distFileName, count, fileId);
totalNumPairs += count;
}
for (i = 0; i < NUM_STREAMS; ++i)
checkCudaErrors( hipStreamDestroy(streams[i]) );
//checkCudaErrors( hipFree(d_var) );
// clean up host variables
checkCudaErrors( hipHostFree(h_out) );
checkCudaErrors( hipFree(d_out) );
checkCudaErrors( hipHostFree(h_in) );
checkCudaErrors( hipFree(d_in) );
//printf("totalNumPairs: %llu\n", totalNumPairs);
printf("%llu\n", totalNumPairs);
}
| 7fa97687ea92465cb4ad8af690dc913b36d3e853.cu | /***********************************************
* # Copyright 2011. Thuy Diem Nguyen & Zejun Zheng
* # Contact: [email protected] or [email protected]
* #
* # GPL 3.0 applies.
* #
* ************************************************/
// Note: don't use_fast_math option
#include "euclidMain.h"
#include "euclidKernel.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
void writeVectorToFile_GPU(thrust::host_vector< thrust::pair<unsigned int, unsigned int> > h_pairVector, thrust::host_vector< float > h_distVector, string pairFileName, string distFileName, unsigned long long count, int fileId) {
FILE * pairFile, * distFile;
string tempStr;
char buf[1000];
sprintf(buf, "_%d", fileId);
tempStr = pairFileName;
tempStr.append(buf);
pairFile = fopen(tempStr.c_str(), "wb");
if (pairFile == NULL){
printf("cannot open pairFile: %s\n", tempStr.c_str());
exit(-1);
}
tempStr = distFileName;
tempStr.append(buf);
distFile = fopen(tempStr.c_str(), "wb");
if (distFile == NULL){
printf("cannot open distFile: %s\n", tempStr.c_str());
exit(-1);
}
thrust::device_vector<float> d_distVector = h_distVector;
thrust::device_vector< thrust::pair<unsigned int, unsigned int> > d_pairVector = h_pairVector;
thrust::sort_by_key(d_distVector.begin(), d_distVector.end(), d_pairVector.begin());
thrust::copy(d_distVector.begin(), d_distVector.end(), h_distVector.begin());
thrust::copy(d_pairVector.begin(), d_pairVector.end(), h_pairVector.begin());
int pairArray[BUF_SIZE*2];
float distArray[BUF_SIZE];
int h = 0;
thrust::pair<unsigned int, unsigned int> aPair;
//cout << "write to : " << tempStr << " " << count << " pairs" << endl;
for (unsigned int i = 0; i < count; ++i)
{
aPair = h_pairVector[i];
distArray[h] = h_distVector[i];
pairArray[h*2] = aPair.first;
pairArray[h*2+1] = aPair.second;
++h;
if (h == BUF_SIZE) {
fwrite(pairArray, sizeof(unsigned int), BUF_SIZE * 2, pairFile);
fwrite(distArray, sizeof(float), BUF_SIZE, distFile);
h = 0;
}
}
if (h > 0) {
fwrite(pairArray, sizeof(unsigned int), h * 2, pairFile);
fwrite(distArray, sizeof(float), h, distFile);
h = 0;
}
fclose(pairFile);
fclose(distFile);
}
void writeToVector(thrust::host_vector< thrust::pair<unsigned int, unsigned int> > & h_pairVector, thrust::host_vector< float > & h_distVector, float *h_out, int stageX, int stageY, int arrayDim, float threshold, unsigned long long & count) {
int i, row, col, rowOffset, colOffset;
float dist;
int arraySize = arrayDim * arrayDim;
rowOffset = stageX * arrayDim;
colOffset = stageY * arrayDim;
// write result to output file
for (i = 0; i < arraySize; ++i)
{
row = rowOffset + (int)i / arrayDim;
col = colOffset + (int)i % arrayDim;
dist = h_out[i];
if (dist < threshold || fabs(dist-threshold) < EPSILON)
{
h_pairVector[count] = thrust::make_pair(row, col);
h_distVector[count] = dist;
++count;
}
}
}
void computeEuclidDist_CUDA(float ** eReads, string pairFileName, string distFileName, int numReads, int numSeeds, float threshold, int arrayDim) {
int i, j, stageX, stageY, row, offset, stageId;
unsigned long long totalNumPairs = 0, count = 0;
int fileId = 0;
int size = arrayDim * arrayDim;
int arraySize = size * NUM_STREAMS;
int gridSize = (arrayDim + BLOCK_DIM - 1)/BLOCK_DIM;
int stageDim = (numReads + arrayDim - 1)/arrayDim;
// determine GRID_DIM and blockSize
dim3 threadsPerBlock(BLOCK_DIM, BLOCK_DIM);
dim3 blocksPerGrid(gridSize, gridSize);
/*
// get number of SMs on this GPU
printf("size: %dx%d, arraySize: %d, stageDim: %dx%d\n", arrayDim, arrayDim, arraySize, stageDim, stageDim);
printf("blockSize: %dx%d, gridSize: %dx%d\n", BLOCK_DIM, BLOCK_DIM, gridSize, gridSize);
*/
// declare host variables
float *h_in;
float *d_in;
checkCudaErrors( cudaMalloc((void**)&d_in, numReads * numSeeds * sizeof(float)) );
checkCudaErrors( cudaMallocHost((void**)&h_in, numReads * numSeeds * sizeof(float)) );
for (i = 0; i < numReads; ++i)
{
row = i * numSeeds;
for (j = 0; j < numSeeds; ++j)
h_in[row + j] = eReads[i][j];
}
/*
float *mean, *variance, diff;
mean = (float*) malloc(numSeeds * sizeof(float));
variance = (float*) malloc(numSeeds * sizeof(float));
for (j = 0; j < numSeeds; ++j) {
mean[j] = 0.0f;
variance[j] = 0.0f;
}
for (i = 0; i < numReads; ++i)
for (j = 0; j < numSeeds; ++j)
mean[j] += eReads[i][j];
for (j = 0; j < numSeeds; ++j)
mean[j] /= numSeeds;
for (i = 0; i < numReads; ++i)
for (j = 0; j < numSeeds; ++j)
{
diff = eReads[i][j]-mean[j];
variance[j] += diff * diff;
}
for (j = 0; j < numSeeds; ++j)
variance[j] /= numReads;
float *d_var;
checkCudaErrors( cudaMalloc((void**)&d_var, numSeeds * sizeof(float)) );
checkCudaErrors( cudaMemcpyAsync(d_var, variance, numSeeds * sizeof(float), cudaMemcpyHostToDevice) );
free(mean);
free(variance);
*/
for (i = 0; i < numReads; ++i)
free(eReads[i]);
free(eReads);
checkCudaErrors( cudaMemcpyAsync(d_in, h_in, numReads * numSeeds * sizeof(float), cudaMemcpyHostToDevice) );
// declare device variables
float *d_out;
float *h_out;
checkCudaErrors( cudaMalloc((void**)&d_out, arraySize * sizeof(float)) );
checkCudaErrors( cudaMallocHost((void**)&h_out, arraySize * sizeof(float)) );
cudaStream_t streams[NUM_STREAMS];
for (i = 0; i < NUM_STREAMS; ++i)
checkCudaErrors( cudaStreamCreate(&streams[i]) );
thrust::host_vector< float > h_distVector (MAX_NUM_PAIRS_GPU * 2);
thrust::host_vector< thrust::pair<unsigned int, unsigned int> > h_pairVector (MAX_NUM_PAIRS_GPU * 2);
int stageSize = stageDim * (stageDim + 1) / 2;
for (j = 0; j < stageSize; j += NUM_STREAMS)
{
for (i = 0; i < NUM_STREAMS; ++i) {
offset = i * size;
stageId = i + j;
if (stageId < stageSize) {
Trag_reverse_eq(stageId, stageDim, stageX, stageY);
//launchEuclidKernel(streams[i], blocksPerGrid, threadsPerBlock, d_in, d_out+offset, numReads, numSeeds, stageX, stageY, arrayDim, d_var);
launchEuclidKernel(streams[i], blocksPerGrid, threadsPerBlock, d_in, d_out+offset, numReads, numSeeds, stageX, stageY, arrayDim);
checkCudaErrors( cudaMemcpyAsync(h_out+offset, d_out+offset, size * sizeof(float), cudaMemcpyDeviceToHost, streams[i]) );
}
}
cudaDeviceSynchronize();
for (i = 0; i < NUM_STREAMS; ++i) {
offset = i * size;
stageId = i + j;
if (stageId < stageSize) {
Trag_reverse_eq(stageId, stageDim, stageX, stageY);
writeToVector(h_pairVector, h_distVector, h_out+offset, stageX, stageY, arrayDim, threshold, count);
}
}
if (count >= MAX_NUM_PAIRS_GPU)
{
h_pairVector.resize(count);
h_distVector.resize(count);
writeVectorToFile_GPU(h_pairVector, h_distVector, pairFileName, distFileName, count, fileId);
h_pairVector.resize(MAX_NUM_PAIRS_GPU * 2);
h_distVector.resize(MAX_NUM_PAIRS_GPU * 2);
++ fileId;
totalNumPairs += count;
count = 0;
}
}
if (count > 0)
{
h_pairVector.resize(count);
h_distVector.resize(count);
writeVectorToFile_GPU(h_pairVector, h_distVector, pairFileName, distFileName, count, fileId);
totalNumPairs += count;
}
for (i = 0; i < NUM_STREAMS; ++i)
checkCudaErrors( cudaStreamDestroy(streams[i]) );
//checkCudaErrors( cudaFree(d_var) );
// clean up host variables
checkCudaErrors( cudaFreeHost(h_out) );
checkCudaErrors( cudaFree(d_out) );
checkCudaErrors( cudaFreeHost(h_in) );
checkCudaErrors( cudaFree(d_in) );
//printf("totalNumPairs: %llu\n", totalNumPairs);
printf("%llu\n", totalNumPairs);
}
|
9ad28915430b50c6cc16b8f55037bfe214fde63a.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////
// GPU version of Monte Carlo algorithm using NVIDIA's CURAND library
////////////////////////////////////////////////////////////////////////
#include "cutil_inline.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
////////////////////////////////////////////////////////////////////////
// CUDA global constants
////////////////////////////////////////////////////////////////////////
__constant__ int N;
__constant__ float T, r, sigma, rho, alpha, dt, con1, con2;
////////////////////////////////////////////////////////////////////////
// kernel routine
////////////////////////////////////////////////////////////////////////
__global__ void pathcalc(float *d_z, float *d_v)
{
float s1, s2, y1, y2, payoff;
// move array pointers to correct position
// version 1
//d_z = d_z + threadIdx.x + 2 * N*blockIdx.x*blockDim.x;
// version 2
d_z = d_z + 2 * N * threadIdx.x + 2 * N * blockIdx.x * blockDim.x;
d_v = d_v + threadIdx.x + blockIdx.x*blockDim.x;
// path calculation
s1 = 1.0f;
s2 = 1.0f;
for (int n = 0; n < N; n++) {
y1 = (*d_z);
// version 1
//d_z += blockDim.x; // shift pointer to next element
// version 2
d_z += 1;
y2 = rho*y1 + alpha*(*d_z);
// version 1
//d_z += blockDim.x; // shift pointer to next element
// version 2
d_z += 1;
s1 = s1*(con1 + con2*y1);
s2 = s2*(con1 + con2*y2);
}
// put payoff value into device array
payoff = 0.0f;
if (fabs(s1 - 1.0f) < 0.1f && fabs(s2 - 1.0f) < 0.1f) payoff = exp(-r*T);
*d_v = payoff;
}
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main_monte(int argc, char* argv[]) {
int NPATH = 960000, h_N = 100;
float h_T, h_r, h_sigma, h_rho, h_alpha, h_dt, h_con1, h_con2;
float *h_v, *d_v, *d_z;
double sum1, sum2;
//double timer, elapsed;
clock_t timer; // for counting the CPU time
double elapsed; // elapsed time
hiprandGenerator_t gen;
// initialise card
cutilDeviceInit(argc, argv);
// allocate memory on host and device
h_v = (float *)malloc(sizeof(float)*NPATH);
cudaSafeCall(hipMalloc((void **)&d_v, sizeof(float)*NPATH));
cudaSafeCall(hipMalloc((void **)&d_z, sizeof(float)* 2 * h_N*NPATH));
// define constants and transfer to GPU
h_T = 1.0f;
h_r = 0.05f;
h_sigma = 0.1f;
h_rho = 0.5f;
h_alpha = sqrt(1.0f - h_rho*h_rho);
h_dt = 1.0f / h_N;
h_con1 = 1.0f + h_r*h_dt;
h_con2 = sqrt(h_dt)*h_sigma;
cudaSafeCall(hipMemcpyToSymbol(N, &h_N, sizeof(h_N)));
cudaSafeCall(hipMemcpyToSymbol(T, &h_T, sizeof(h_T)));
cudaSafeCall(hipMemcpyToSymbol(r, &h_r, sizeof(h_r)));
cudaSafeCall(hipMemcpyToSymbol(sigma, &h_sigma, sizeof(h_sigma)));
cudaSafeCall(hipMemcpyToSymbol(rho, &h_rho, sizeof(h_rho)));
cudaSafeCall(hipMemcpyToSymbol(alpha, &h_alpha, sizeof(h_alpha)));
cudaSafeCall(hipMemcpyToSymbol(dt, &h_dt, sizeof(h_dt)));
cudaSafeCall(hipMemcpyToSymbol(con1, &h_con1, sizeof(h_con1)));
cudaSafeCall(hipMemcpyToSymbol(con2, &h_con2, sizeof(h_con2)));
// random number generation
timer = clock(); // initialise timer
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
hiprandGenerateNormal(gen, d_z, 2 * h_N*NPATH, 0.0f, 1.0f);
cudaSafeCall(hipDeviceSynchronize());
elapsed = elapsed_time(&timer);
printf("\nCURAND normal RNG execution time (ms): %f , samples/sec: %e \n",
elapsed, 2.0*h_N*NPATH / elapsed);
// execute kernel and time it
pathcalc << <NPATH / 64, 64 >> >(d_z, d_v);
cudaCheckMsg("pathcalc execution failed\n");
cudaSafeCall(hipDeviceSynchronize());
elapsed = elapsed_time(&timer);
printf("Monte Carlo kernel execution time (ms): %f \n", elapsed);
// copy back results
cudaSafeCall(hipMemcpy(h_v, d_v, sizeof(float)*NPATH,
hipMemcpyDeviceToHost));
// compute average
sum1 = 0.0;
sum2 = 0.0;
for (int i = 0; i < NPATH; i++) {
sum1 += h_v[i];
//printf("%f\n", h_v[i]);
sum2 += h_v[i] * h_v[i];
}
printf("\nAverage value and standard deviation of error = %13.8f %13.8f\n\n",
sum1 / NPATH, sqrt((sum2 / NPATH - (sum1 / NPATH)*(sum1 / NPATH)) / NPATH));
// Tidy up library
hiprandDestroyGenerator(gen);
// Release memory and exit cleanly
free(h_v);
cudaSafeCall(hipFree(d_v));
cudaSafeCall(hipFree(d_z));
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
system("pause");
return 0;
} | 9ad28915430b50c6cc16b8f55037bfe214fde63a.cu | ////////////////////////////////////////////////////////////////////////
// GPU version of Monte Carlo algorithm using NVIDIA's CURAND library
////////////////////////////////////////////////////////////////////////
#include "cutil_inline.h"
#include <cuda.h>
#include <curand.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
////////////////////////////////////////////////////////////////////////
// CUDA global constants
////////////////////////////////////////////////////////////////////////
__constant__ int N;
__constant__ float T, r, sigma, rho, alpha, dt, con1, con2;
////////////////////////////////////////////////////////////////////////
// kernel routine
////////////////////////////////////////////////////////////////////////
__global__ void pathcalc(float *d_z, float *d_v)
{
float s1, s2, y1, y2, payoff;
// move array pointers to correct position
// version 1
//d_z = d_z + threadIdx.x + 2 * N*blockIdx.x*blockDim.x;
// version 2
d_z = d_z + 2 * N * threadIdx.x + 2 * N * blockIdx.x * blockDim.x;
d_v = d_v + threadIdx.x + blockIdx.x*blockDim.x;
// path calculation
s1 = 1.0f;
s2 = 1.0f;
for (int n = 0; n < N; n++) {
y1 = (*d_z);
// version 1
//d_z += blockDim.x; // shift pointer to next element
// version 2
d_z += 1;
y2 = rho*y1 + alpha*(*d_z);
// version 1
//d_z += blockDim.x; // shift pointer to next element
// version 2
d_z += 1;
s1 = s1*(con1 + con2*y1);
s2 = s2*(con1 + con2*y2);
}
// put payoff value into device array
payoff = 0.0f;
if (fabs(s1 - 1.0f) < 0.1f && fabs(s2 - 1.0f) < 0.1f) payoff = exp(-r*T);
*d_v = payoff;
}
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main_monte(int argc, char* argv[]) {
int NPATH = 960000, h_N = 100;
float h_T, h_r, h_sigma, h_rho, h_alpha, h_dt, h_con1, h_con2;
float *h_v, *d_v, *d_z;
double sum1, sum2;
//double timer, elapsed;
clock_t timer; // for counting the CPU time
double elapsed; // elapsed time
curandGenerator_t gen;
// initialise card
cutilDeviceInit(argc, argv);
// allocate memory on host and device
h_v = (float *)malloc(sizeof(float)*NPATH);
cudaSafeCall(cudaMalloc((void **)&d_v, sizeof(float)*NPATH));
cudaSafeCall(cudaMalloc((void **)&d_z, sizeof(float)* 2 * h_N*NPATH));
// define constants and transfer to GPU
h_T = 1.0f;
h_r = 0.05f;
h_sigma = 0.1f;
h_rho = 0.5f;
h_alpha = sqrt(1.0f - h_rho*h_rho);
h_dt = 1.0f / h_N;
h_con1 = 1.0f + h_r*h_dt;
h_con2 = sqrt(h_dt)*h_sigma;
cudaSafeCall(cudaMemcpyToSymbol(N, &h_N, sizeof(h_N)));
cudaSafeCall(cudaMemcpyToSymbol(T, &h_T, sizeof(h_T)));
cudaSafeCall(cudaMemcpyToSymbol(r, &h_r, sizeof(h_r)));
cudaSafeCall(cudaMemcpyToSymbol(sigma, &h_sigma, sizeof(h_sigma)));
cudaSafeCall(cudaMemcpyToSymbol(rho, &h_rho, sizeof(h_rho)));
cudaSafeCall(cudaMemcpyToSymbol(alpha, &h_alpha, sizeof(h_alpha)));
cudaSafeCall(cudaMemcpyToSymbol(dt, &h_dt, sizeof(h_dt)));
cudaSafeCall(cudaMemcpyToSymbol(con1, &h_con1, sizeof(h_con1)));
cudaSafeCall(cudaMemcpyToSymbol(con2, &h_con2, sizeof(h_con2)));
// random number generation
timer = clock(); // initialise timer
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
curandGenerateNormal(gen, d_z, 2 * h_N*NPATH, 0.0f, 1.0f);
cudaSafeCall(cudaDeviceSynchronize());
elapsed = elapsed_time(&timer);
printf("\nCURAND normal RNG execution time (ms): %f , samples/sec: %e \n",
elapsed, 2.0*h_N*NPATH / elapsed);
// execute kernel and time it
pathcalc << <NPATH / 64, 64 >> >(d_z, d_v);
cudaCheckMsg("pathcalc execution failed\n");
cudaSafeCall(cudaDeviceSynchronize());
elapsed = elapsed_time(&timer);
printf("Monte Carlo kernel execution time (ms): %f \n", elapsed);
// copy back results
cudaSafeCall(cudaMemcpy(h_v, d_v, sizeof(float)*NPATH,
cudaMemcpyDeviceToHost));
// compute average
sum1 = 0.0;
sum2 = 0.0;
for (int i = 0; i < NPATH; i++) {
sum1 += h_v[i];
//printf("%f\n", h_v[i]);
sum2 += h_v[i] * h_v[i];
}
printf("\nAverage value and standard deviation of error = %13.8f %13.8f\n\n",
sum1 / NPATH, sqrt((sum2 / NPATH - (sum1 / NPATH)*(sum1 / NPATH)) / NPATH));
// Tidy up library
curandDestroyGenerator(gen);
// Release memory and exit cleanly
free(h_v);
cudaSafeCall(cudaFree(d_v));
cudaSafeCall(cudaFree(d_z));
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
system("pause");
return 0;
} |
4d4ed44c5b95ebe3092ae227f65e964e0284861b.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
struct ELUupdateOutput_functor
{
const float alpha_;
ELUupdateOutput_functor(float alpha)
: alpha_(alpha)
{}
__device__ void operator()(float *output, const float *input) const
{
*output = *input <= 0 ? (exp(*input) - 1) * alpha_ : *input;
}
};
// in-place variant
struct ELUupdateOutputIP_functor
{
const float alpha_;
ELUupdateOutputIP_functor(float alpha)
: alpha_(alpha)
{}
__device__ void operator()(float *x) const
{
*x = *x <= 0 ? (exp(*x) - 1) * alpha_ : *x;
}
};
void THNN_CudaELU_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output,
float alpha, bool inplace)
{
THCUNN_assertSameGPU(state, 2, input, output);
if (inplace)
{
THCudaTensor_pointwiseApply1(state, input, ELUupdateOutputIP_functor(alpha));
THCudaTensor_set(state, output, input);
}
else
{
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input, ELUupdateOutput_functor(alpha));
}
}
struct ELUupdateGradInput_functor
{
const float alpha_;
ELUupdateGradInput_functor(float alpha)
: alpha_(alpha)
{}
__device__ void operator()(float *gradInput, const float *output, const float *gradOutput) const
{
*gradInput = (*output) <= 0 ? (*gradOutput * (*output + alpha_)) : (*gradOutput);
}
};
struct ELUupdateGradInputIP_functor
{
const float alpha_;
ELUupdateGradInputIP_functor(float alpha)
: alpha_(alpha)
{}
__device__ void operator()(float *gradOutput, const float *output) const
{
*gradOutput = (*output) <= 0 ? (*gradOutput * (*output + alpha_)) : (*gradOutput);
}
};
void THNN_CudaELU_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput,
THCudaTensor *gradInput, THCudaTensor *output, float alpha, bool inplace)
{
THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput);
if (inplace)
{
THCudaTensor_pointwiseApply2(state, gradOutput, output, ELUupdateGradInputIP_functor(alpha));
THCudaTensor_set(state, gradInput, gradOutput);
}
else
{
THCudaTensor_resizeAs(state, gradInput, output);
THCudaTensor_pointwiseApply3(state, gradInput, output, gradOutput, ELUupdateGradInput_functor(alpha));
}
}
| 4d4ed44c5b95ebe3092ae227f65e964e0284861b.cu | #include "THCUNN.h"
#include "common.h"
struct ELUupdateOutput_functor
{
const float alpha_;
ELUupdateOutput_functor(float alpha)
: alpha_(alpha)
{}
__device__ void operator()(float *output, const float *input) const
{
*output = *input <= 0 ? (exp(*input) - 1) * alpha_ : *input;
}
};
// in-place variant
struct ELUupdateOutputIP_functor
{
const float alpha_;
ELUupdateOutputIP_functor(float alpha)
: alpha_(alpha)
{}
__device__ void operator()(float *x) const
{
*x = *x <= 0 ? (exp(*x) - 1) * alpha_ : *x;
}
};
void THNN_CudaELU_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output,
float alpha, bool inplace)
{
THCUNN_assertSameGPU(state, 2, input, output);
if (inplace)
{
THCudaTensor_pointwiseApply1(state, input, ELUupdateOutputIP_functor(alpha));
THCudaTensor_set(state, output, input);
}
else
{
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input, ELUupdateOutput_functor(alpha));
}
}
struct ELUupdateGradInput_functor
{
const float alpha_;
ELUupdateGradInput_functor(float alpha)
: alpha_(alpha)
{}
__device__ void operator()(float *gradInput, const float *output, const float *gradOutput) const
{
*gradInput = (*output) <= 0 ? (*gradOutput * (*output + alpha_)) : (*gradOutput);
}
};
struct ELUupdateGradInputIP_functor
{
const float alpha_;
ELUupdateGradInputIP_functor(float alpha)
: alpha_(alpha)
{}
__device__ void operator()(float *gradOutput, const float *output) const
{
*gradOutput = (*output) <= 0 ? (*gradOutput * (*output + alpha_)) : (*gradOutput);
}
};
void THNN_CudaELU_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput,
THCudaTensor *gradInput, THCudaTensor *output, float alpha, bool inplace)
{
THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput);
if (inplace)
{
THCudaTensor_pointwiseApply2(state, gradOutput, output, ELUupdateGradInputIP_functor(alpha));
THCudaTensor_set(state, gradInput, gradOutput);
}
else
{
THCudaTensor_resizeAs(state, gradInput, output);
THCudaTensor_pointwiseApply3(state, gradInput, output, gradOutput, ELUupdateGradInput_functor(alpha));
}
}
|
9a90c6a981a5a7bd35ebc157bad995aec2e8ae3a.hip | // !!! This is a file automatically generated by hipify!!!
#include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
extern "C" __device__ void _Z12julia_matmul3PtrI7Float64ES_IS0_ES_IS0_E5Int32(double* out, double* A, double* B, int dim);
__global__ void __matmul(double* out, double* A, double* B, int dim) {
_Z12julia_matmul3PtrI7Float64ES_IS0_ES_IS0_E5Int32(out, A, B, dim);
}
int main(int argc, char *argv[]) {
int dim = 1024;
if (argc >= 2) {
dim = atoi(argv[1]);
}
printf("dim: %i\n", dim);
double* A = (double*)malloc(sizeof(double) * dim * dim);
A[1] = 0xfffffff;;
double* B = (double*)malloc(sizeof(double) * dim * dim);
B[1] = 0xfffffff;
double* output = (double*)malloc(sizeof(double) * dim * dim);
printf("start, out1: %f, a1: %f, b1: %f\n", output[1], A[1], B[1]);
double *d_A, *d_B, *d_output;
hipMalloc((void **)&d_A, sizeof(double) * dim * dim);
hipMalloc((void **)&d_B, sizeof(double) * dim * dim);
hipMalloc((void **)&d_output, sizeof(double) * dim * dim);
hipMemcpy(d_A, A, dim*dim, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, dim*dim, hipMemcpyHostToDevice);
int tileDim = 32;
int numBlocks = dim/32;
dim3 threadsPerBlock(tileDim, tileDim);
hipLaunchKernelGGL(( __matmul), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, output, A, B, dim);
hipDeviceSynchronize();
hipMemcpy(output, d_output, dim*dim, hipMemcpyDeviceToHost);
printf("done, out1: %f\n", output[1]);
hipFree(d_A);
hipFree(d_B);
hipFree(d_output);
free(A);
free(B);
free(output);
}
| 9a90c6a981a5a7bd35ebc157bad995aec2e8ae3a.cu | #include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
extern "C" __device__ void _Z12julia_matmul3PtrI7Float64ES_IS0_ES_IS0_E5Int32(double* out, double* A, double* B, int dim);
__global__ void __matmul(double* out, double* A, double* B, int dim) {
_Z12julia_matmul3PtrI7Float64ES_IS0_ES_IS0_E5Int32(out, A, B, dim);
}
int main(int argc, char *argv[]) {
int dim = 1024;
if (argc >= 2) {
dim = atoi(argv[1]);
}
printf("dim: %i\n", dim);
double* A = (double*)malloc(sizeof(double) * dim * dim);
A[1] = 0xfffffff;;
double* B = (double*)malloc(sizeof(double) * dim * dim);
B[1] = 0xfffffff;
double* output = (double*)malloc(sizeof(double) * dim * dim);
printf("start, out1: %f, a1: %f, b1: %f\n", output[1], A[1], B[1]);
double *d_A, *d_B, *d_output;
cudaMalloc((void **)&d_A, sizeof(double) * dim * dim);
cudaMalloc((void **)&d_B, sizeof(double) * dim * dim);
cudaMalloc((void **)&d_output, sizeof(double) * dim * dim);
cudaMemcpy(d_A, A, dim*dim, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, dim*dim, cudaMemcpyHostToDevice);
int tileDim = 32;
int numBlocks = dim/32;
dim3 threadsPerBlock(tileDim, tileDim);
__matmul<<<numBlocks, threadsPerBlock>>>(output, A, B, dim);
cudaDeviceSynchronize();
cudaMemcpy(output, d_output, dim*dim, cudaMemcpyDeviceToHost);
printf("done, out1: %f\n", output[1]);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_output);
free(A);
free(B);
free(output);
}
|
4de32a1fac74283f146b0dcb5b9da2ecf4d59734.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/mean_iou_op.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void CountCUDAKernel(const int num_classes,
const int count,
const T* predictions,
const T* labels,
int* wrong,
int* correct) {
extern __shared__ int blcok_cache[];
int* wrong_c = blcok_cache;
int* correct_c = blcok_cache + num_classes;
// init cache
for (int i = threadIdx.x; i < num_classes * 2; i += blockDim.x) {
blcok_cache[i] = 0;
}
__syncthreads();
T pred;
T label;
CUDA_KERNEL_LOOP(i, count) {
pred = predictions[i];
label = labels[i];
if (pred == label) {
atomicAdd(correct_c + pred, 1);
} else {
atomicAdd(wrong_c + pred, 1);
atomicAdd(wrong_c + label, 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < num_classes; i += blockDim.x) {
atomicAdd(wrong + i, wrong_c[i]);
atomicAdd(correct + i, correct_c[i]);
}
}
__global__ void ComputeIoUCUDAKernel(
const int num_classes, int* wrong, int* correct, float* ious, float* iou) {
__shared__ int valid_count_c;
if (threadIdx.x == 0) {
valid_count_c = 0;
}
__syncthreads();
CUDA_KERNEL_LOOP(i, num_classes) {
int wrong_n = wrong[i];
int correct_n = correct[i];
int denominator = wrong_n + correct_n;
if (denominator > 0) {
atomicAdd(&valid_count_c, 1);
ious[i] = static_cast<float>(correct_n) / denominator;
} else {
ious[i] = 0;
}
}
__syncthreads();
if (threadIdx.x == 0) {
float iou_sum = 0;
for (int i = 0; i < num_classes; ++i) {
iou_sum += ious[i];
}
iou[0] += iou_sum / valid_count_c;
}
}
template <typename T>
class MeanIoUCUDAOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto& place = *dev_ctx.eigen_device();
// get input and output tensor
auto* predictions = ctx.Input<phi::DenseTensor>("Predictions");
auto* labels = ctx.Input<phi::DenseTensor>("Labels");
auto* out_mean_iou = ctx.Output<phi::DenseTensor>("OutMeanIou");
auto* out_wrong = ctx.Output<phi::DenseTensor>("OutWrong");
auto* out_correct = ctx.Output<phi::DenseTensor>("OutCorrect");
int num_classes = static_cast<int>(ctx.Attr<int>("num_classes"));
// Get data ptr
const T* predictions_data = predictions->data<T>();
const T* labels_data = labels->data<T>();
int* out_wrong_data = out_wrong->mutable_data<int>(ctx.GetPlace());
int* out_correct_data = out_correct->mutable_data<int>(ctx.GetPlace());
float* out_mean_iou_data =
out_mean_iou->mutable_data<float>(ctx.GetPlace());
// Get Eigen tensor
auto out_mean_iou_t = EigenTensor<float, 1>::From(*out_mean_iou);
auto out_wrong_t = EigenTensor<int, 1>::From(*out_wrong);
auto out_correct_t = EigenTensor<int, 1>::From(*out_correct);
// Temporary memory
auto tmp_ious_data = memory::Alloc(
dev_ctx.GetPlace(),
num_classes * sizeof(float),
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
float* ious_data = static_cast<float*>(tmp_ious_data->ptr());
// Init out_wrong, out_correct and out_mean_iou
out_wrong_t.device(place) = out_wrong_t.constant(0);
out_correct_t.device(place) = out_correct_t.constant(0);
out_mean_iou_t.device(place) = out_mean_iou_t.constant(0.0f);
// collect pre wrong, correct and mean_iou
auto in_mean_ious = ctx.MultiInput<phi::DenseTensor>("InMeanIou");
for (int i = 0; i < in_mean_ious.size(); ++i) {
out_mean_iou_t.device(place) +=
EigenTensor<float, 1>::From(*in_mean_ious[i]);
}
auto in_wrongs = ctx.MultiInput<phi::DenseTensor>("InWrongs");
for (int i = 0; i < in_wrongs.size(); ++i) {
out_wrong_t.device(place) += EigenTensor<int, 1>::From(*in_wrongs[i]);
}
auto in_corrects = ctx.MultiInput<phi::DenseTensor>("InCorrects");
for (int i = 0; i < in_corrects.size(); ++i) {
out_correct_t.device(place) += EigenTensor<int, 1>::From(*in_corrects[i]);
}
// compute
auto stream = ctx.cuda_device_context().stream();
int block = PADDLE_CUDA_NUM_THREADS;
int grid = (predictions->numel() + block - 1) / block;
int cache_size = (num_classes * 2 + 1) * sizeof(int);
hipLaunchKernelGGL(( CountCUDAKernel<T>)
, dim3(grid), dim3(block), cache_size, stream, num_classes,
predictions->numel(),
predictions_data,
labels_data,
out_wrong_data,
out_correct_data);
hipLaunchKernelGGL(( ComputeIoUCUDAKernel), dim3(1), dim3(block), 0, stream, num_classes,
out_wrong_data,
out_correct_data,
ious_data,
out_mean_iou_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(mean_iou,
ops::MeanIoUCUDAOpKernel<int>,
ops::MeanIoUCUDAOpKernel<int64_t>,
ops::MeanIoUCUDAOpKernel<int32_t>);
| 4de32a1fac74283f146b0dcb5b9da2ecf4d59734.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/mean_iou_op.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void CountCUDAKernel(const int num_classes,
const int count,
const T* predictions,
const T* labels,
int* wrong,
int* correct) {
extern __shared__ int blcok_cache[];
int* wrong_c = blcok_cache;
int* correct_c = blcok_cache + num_classes;
// init cache
for (int i = threadIdx.x; i < num_classes * 2; i += blockDim.x) {
blcok_cache[i] = 0;
}
__syncthreads();
T pred;
T label;
CUDA_KERNEL_LOOP(i, count) {
pred = predictions[i];
label = labels[i];
if (pred == label) {
atomicAdd(correct_c + pred, 1);
} else {
atomicAdd(wrong_c + pred, 1);
atomicAdd(wrong_c + label, 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < num_classes; i += blockDim.x) {
atomicAdd(wrong + i, wrong_c[i]);
atomicAdd(correct + i, correct_c[i]);
}
}
__global__ void ComputeIoUCUDAKernel(
const int num_classes, int* wrong, int* correct, float* ious, float* iou) {
__shared__ int valid_count_c;
if (threadIdx.x == 0) {
valid_count_c = 0;
}
__syncthreads();
CUDA_KERNEL_LOOP(i, num_classes) {
int wrong_n = wrong[i];
int correct_n = correct[i];
int denominator = wrong_n + correct_n;
if (denominator > 0) {
atomicAdd(&valid_count_c, 1);
ious[i] = static_cast<float>(correct_n) / denominator;
} else {
ious[i] = 0;
}
}
__syncthreads();
if (threadIdx.x == 0) {
float iou_sum = 0;
for (int i = 0; i < num_classes; ++i) {
iou_sum += ious[i];
}
iou[0] += iou_sum / valid_count_c;
}
}
template <typename T>
class MeanIoUCUDAOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto& place = *dev_ctx.eigen_device();
// get input and output tensor
auto* predictions = ctx.Input<phi::DenseTensor>("Predictions");
auto* labels = ctx.Input<phi::DenseTensor>("Labels");
auto* out_mean_iou = ctx.Output<phi::DenseTensor>("OutMeanIou");
auto* out_wrong = ctx.Output<phi::DenseTensor>("OutWrong");
auto* out_correct = ctx.Output<phi::DenseTensor>("OutCorrect");
int num_classes = static_cast<int>(ctx.Attr<int>("num_classes"));
// Get data ptr
const T* predictions_data = predictions->data<T>();
const T* labels_data = labels->data<T>();
int* out_wrong_data = out_wrong->mutable_data<int>(ctx.GetPlace());
int* out_correct_data = out_correct->mutable_data<int>(ctx.GetPlace());
float* out_mean_iou_data =
out_mean_iou->mutable_data<float>(ctx.GetPlace());
// Get Eigen tensor
auto out_mean_iou_t = EigenTensor<float, 1>::From(*out_mean_iou);
auto out_wrong_t = EigenTensor<int, 1>::From(*out_wrong);
auto out_correct_t = EigenTensor<int, 1>::From(*out_correct);
// Temporary memory
auto tmp_ious_data = memory::Alloc(
dev_ctx.GetPlace(),
num_classes * sizeof(float),
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
float* ious_data = static_cast<float*>(tmp_ious_data->ptr());
// Init out_wrong, out_correct and out_mean_iou
out_wrong_t.device(place) = out_wrong_t.constant(0);
out_correct_t.device(place) = out_correct_t.constant(0);
out_mean_iou_t.device(place) = out_mean_iou_t.constant(0.0f);
// collect pre wrong, correct and mean_iou
auto in_mean_ious = ctx.MultiInput<phi::DenseTensor>("InMeanIou");
for (int i = 0; i < in_mean_ious.size(); ++i) {
out_mean_iou_t.device(place) +=
EigenTensor<float, 1>::From(*in_mean_ious[i]);
}
auto in_wrongs = ctx.MultiInput<phi::DenseTensor>("InWrongs");
for (int i = 0; i < in_wrongs.size(); ++i) {
out_wrong_t.device(place) += EigenTensor<int, 1>::From(*in_wrongs[i]);
}
auto in_corrects = ctx.MultiInput<phi::DenseTensor>("InCorrects");
for (int i = 0; i < in_corrects.size(); ++i) {
out_correct_t.device(place) += EigenTensor<int, 1>::From(*in_corrects[i]);
}
// compute
auto stream = ctx.cuda_device_context().stream();
int block = PADDLE_CUDA_NUM_THREADS;
int grid = (predictions->numel() + block - 1) / block;
int cache_size = (num_classes * 2 + 1) * sizeof(int);
CountCUDAKernel<T>
<<<grid, block, cache_size, stream>>>(num_classes,
predictions->numel(),
predictions_data,
labels_data,
out_wrong_data,
out_correct_data);
ComputeIoUCUDAKernel<<<1, block, 0, stream>>>(num_classes,
out_wrong_data,
out_correct_data,
ious_data,
out_mean_iou_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(mean_iou,
ops::MeanIoUCUDAOpKernel<int>,
ops::MeanIoUCUDAOpKernel<int64_t>,
ops::MeanIoUCUDAOpKernel<int32_t>);
|
e9e4ecb38633d6adb747e19ac01ff78d00822757.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <cstdio>
#ifndef N
#error N must be defined
#endif
#define PREDICATE(x) (((x & 1) == 0) ? 1 : 0)
__global__ void compact(int *out, int*in) {
__shared__ unsigned flag[N];
__shared__ unsigned idx[N];
unsigned t = threadIdx.x;
// (i) test each element with predicate p
// flag = 1 if keeping element
// 0 otherwise
flag[t] = PREDICATE(in[t]);
// (ii) compute indexes for scatter
// using an exclusive prefix sum
__syncthreads();
if (t < N/2) {
idx[2*t] = flag[2*t];
idx[2*t+1] = flag[2*t+1];
}
// (a) upsweep
int offset = 1;
for (unsigned d = N/2; d > 0; d /= 2) {
__syncthreads();
if (t < d) {
int ai = offset * (2 * t + 1) - 1;
int bi = offset * (2 * t + 2) - 1;
idx[bi] += idx[ai];
}
offset *= 2;
}
// (b) downsweep
if (t == 0) idx[N-1] = 0;
for (unsigned d = 1; d < N; d *= 2) {
offset /= 2;
__syncthreads();
if (t < d) {
int ai = offset * (2 * t + 1) - 1;
int bi = offset * (2 * t + 2) - 1;
int temp = idx[ai];
idx[ai] = idx[bi];
idx[bi] += temp;
}
}
__syncthreads();
// end of exclusive prefix sum of flag into idx
// (iii) scatter
if (flag[t]) out[idx[t]] = in[t];
}
int main(int argc, char **argv) {
// test data
size_t ArraySize = N * sizeof(int);
int *in = (int *)malloc(ArraySize);
int *out = (int *)malloc(ArraySize);
klee_make_symbolic(in, ArraySize, "in");
// create some memory objects on the device
int *d_in;
int *d_out;
hipMalloc((void **)&d_in, ArraySize);
hipMalloc((void **)&d_out, ArraySize);
// memcpy into these objects
hipMemcpy(d_in, in, ArraySize, hipMemcpyHostToDevice);
// run the kernel
hipLaunchKernelGGL(( compact), dim3(1),dim3(N), 0, 0, d_out, d_in);
// memcpy back the result
hipMemcpy(out, d_out, ArraySize, hipMemcpyDeviceToHost);
// cleanup
free(in);
free(out);
hipFree(d_in);
hipFree(d_out);
return 0;
}
| e9e4ecb38633d6adb747e19ac01ff78d00822757.cu | #include <cassert>
#include <cstdio>
#ifndef N
#error N must be defined
#endif
#define PREDICATE(x) (((x & 1) == 0) ? 1 : 0)
__global__ void compact(int *out, int*in) {
__shared__ unsigned flag[N];
__shared__ unsigned idx[N];
unsigned t = threadIdx.x;
// (i) test each element with predicate p
// flag = 1 if keeping element
// 0 otherwise
flag[t] = PREDICATE(in[t]);
// (ii) compute indexes for scatter
// using an exclusive prefix sum
__syncthreads();
if (t < N/2) {
idx[2*t] = flag[2*t];
idx[2*t+1] = flag[2*t+1];
}
// (a) upsweep
int offset = 1;
for (unsigned d = N/2; d > 0; d /= 2) {
__syncthreads();
if (t < d) {
int ai = offset * (2 * t + 1) - 1;
int bi = offset * (2 * t + 2) - 1;
idx[bi] += idx[ai];
}
offset *= 2;
}
// (b) downsweep
if (t == 0) idx[N-1] = 0;
for (unsigned d = 1; d < N; d *= 2) {
offset /= 2;
__syncthreads();
if (t < d) {
int ai = offset * (2 * t + 1) - 1;
int bi = offset * (2 * t + 2) - 1;
int temp = idx[ai];
idx[ai] = idx[bi];
idx[bi] += temp;
}
}
__syncthreads();
// end of exclusive prefix sum of flag into idx
// (iii) scatter
if (flag[t]) out[idx[t]] = in[t];
}
int main(int argc, char **argv) {
// test data
size_t ArraySize = N * sizeof(int);
int *in = (int *)malloc(ArraySize);
int *out = (int *)malloc(ArraySize);
klee_make_symbolic(in, ArraySize, "in");
// create some memory objects on the device
int *d_in;
int *d_out;
cudaMalloc((void **)&d_in, ArraySize);
cudaMalloc((void **)&d_out, ArraySize);
// memcpy into these objects
cudaMemcpy(d_in, in, ArraySize, cudaMemcpyHostToDevice);
// run the kernel
compact<<<1,N>>>(d_out, d_in);
// memcpy back the result
cudaMemcpy(out, d_out, ArraySize, cudaMemcpyDeviceToHost);
// cleanup
free(in);
free(out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
27c70084effa57312fd21f7c91b7ba830cd6f512.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* GPU Version */
// original file is https://www.olcf.ornl.gov/tutorials/cuda-vector-addition/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "timer.h"
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
// Make sure we do not go out of bounds
int i;
for (i = id; i < n; i += stride)
c[i] = a[i] + b[i];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 2000;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
fprintf(stderr, "Allocating CPU memory and populating arrays of length %d ...", n);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
fprintf(stderr, " done.\n");
fprintf(stderr, "Performing vector addition (timer started) ...");
StartTimer();
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
// Copy host vectors to device
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((double)n/blockSize);
if (gridSize > 65535) gridSize = 32000;
printf("GridSize %d and total_threads %d\n", gridSize, gridSize * blockSize);
// Execute the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
// Copy array back to host
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost );
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipDeviceSynchronize();
double runtime = GetTimer();
fprintf(stderr, " done in %.2f s.\n", runtime / 1000);
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
double tol = 1e-6;
//printf("\nout is %f\n", sum/n);
//if (fabs(sum/n - 1.0) > tol) printf("Warning: potential numerical problems.\n");
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| 27c70084effa57312fd21f7c91b7ba830cd6f512.cu | /* GPU Version */
// original file is https://www.olcf.ornl.gov/tutorials/cuda-vector-addition/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "timer.h"
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
// Make sure we do not go out of bounds
int i;
for (i = id; i < n; i += stride)
c[i] = a[i] + b[i];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 2000;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
fprintf(stderr, "Allocating CPU memory and populating arrays of length %d ...", n);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
fprintf(stderr, " done.\n");
fprintf(stderr, "Performing vector addition (timer started) ...");
StartTimer();
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Copy host vectors to device
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((double)n/blockSize);
if (gridSize > 65535) gridSize = 32000;
printf("GridSize %d and total_threads %d\n", gridSize, gridSize * blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaDeviceSynchronize();
double runtime = GetTimer();
fprintf(stderr, " done in %.2f s.\n", runtime / 1000);
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
double tol = 1e-6;
//printf("\nout is %f\n", sum/n);
//if (fabs(sum/n - 1.0) > tol) printf("Warning: potential numerical problems.\n");
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
5e9d0b8b5d208fd77f8b6ea4cde4e388b0b78300.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include "wb.h"
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void total(float *input, float *output, int len) {
//@@ Load a segment of the input vector into shared memory
//@@ Traverse the reduction tree
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
}
int main(int argc, char **argv) {
int ii;
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numInputElements);
wbLog(TRACE, "The number of output elements in the input is ", numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
wbTime_stop(Copy, "Copying output memory to the CPU");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbTime_stop(GPU, "Freeing GPU Memory");
free(hostInput);
free(hostOutput);
return 0;
}
| 5e9d0b8b5d208fd77f8b6ea4cde4e388b0b78300.cu | // Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include "wb.h"
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void total(float *input, float *output, int len) {
//@@ Load a segment of the input vector into shared memory
//@@ Traverse the reduction tree
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
}
int main(int argc, char **argv) {
int ii;
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *)wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numInputElements);
wbLog(TRACE, "The number of output elements in the input is ", numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
wbTime_stop(Copy, "Copying output memory to the CPU");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbTime_stop(GPU, "Freeing GPU Memory");
free(hostInput);
free(hostOutput);
return 0;
}
|
057ea036a5de5aa1be1fef752243db0c76305f92.hip | // !!! This is a file automatically generated by hipify!!!
#include <sys/time.h>
#include <stdio.h>
#include <type_traits> // is_same
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <rocblas.h>
#include "utils.h"
template <typename T, typename S>
void allocate_memory(int m, int n, int k, T **A, T **B, S **C) {
hipMallocManaged(A, m * k * sizeof(T));
hipMallocManaged(B, k * n * sizeof(T));
hipMallocManaged(C, m * n * sizeof(S));
}
template <typename T, typename S>
void free_memory(T *A, T *B, S *C) {
hipFree(A);
hipFree(B);
hipFree(C);
}
template <typename T, typename S>
bool cublas_gemm_ex(
hipblasHandle_t handle, hipblasOperation_t transA, hipblasOperation_t transB,
const int m, const int n, const int k,
T *A, T *B, S *C,
int lda, int ldb, int ldc,
const S *alpha, const S *beta, int algo)
{
hipDataType AType, BType, CType, ComputeType;
if (std::is_same<T, double>::value) {
AType = BType = CType = ComputeType = HIP_R_64F;
} else if (std::is_same<T, float>::value) {
AType = BType = CType = ComputeType = HIP_R_32F;
} else if (std::is_same<T, __half>::value) {
AType = BType = CType = ComputeType = HIP_R_16F;
} else if (std::is_same<T, int8_t>::value) {
AType = BType = HIP_R_8I;
CType = ComputeType = HIP_R_32I;
} else {
printf("Not supported data type.");
return false;
}
hipblasStatus_t status = hipblasGemmEx(handle,
transA, transB,
m, n, k,
alpha, A, AType, lda,
B, BType, ldb, beta,
C, CType, ldc, ComputeType,
static_cast<hipblasGemmAlgo_t>(algo));
return (status == HIPBLAS_STATUS_SUCCESS);
}
template <typename T, typename S>
void test_gemm(hipblasHandle_t handle,
const int m, const int n, const int k,
T *A, T *B, S *C,
const S *alpha, const S *beta, int algo, const int iteration)
{
double total_time = 0;
struct timeval start, end;
for (int i = 0; i < iteration; ++i) {
hipDeviceSynchronize();
gettimeofday(&start, NULL);
bool success = cublas_gemm_ex(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
n, // number of rows of matrix A and C
m, // number of columns of matrix B and C
k, // number of columns of A and rows of B
B,
A,
C,
n, // lda
k, // ldb
n, // ldc
alpha,
beta,
static_cast<hipblasGemmAlgo_t>(algo));
hipDeviceSynchronize();
gettimeofday(&end, NULL);
if (!success) break;
else if (i > 0) {
total_time += (end.tv_sec - start.tv_sec) * 1000 + (end.tv_usec - start.tv_usec) * 0.001;
}
}
if (total_time > 0.0) {
double avg_time = total_time / (iteration - 1);
printf("algo %d: %.3f ms\n", algo, avg_time);
performance(m, n, k, std::is_same<T, int8_t>::value, avg_time * 1e-3);
}
}
int main(int argc, char* argv[]) {
if (argc != 5) {
printf("Usage: %s <M> <N> <K> <iterations>\n", argv[0]);
printf("C = A X B (A: M * K, B: K * N, C: M * N)\n");
return 1;
}
const int m = atoi(argv[1]);
const int n = atoi(argv[2]);
const int k = atoi(argv[3]);
const int iteration = atoi(argv[4]);
printf("shape: (%d, %d) x (%d, %d)\n", m, k, k, n);
int start_algo = HIPBLAS_GEMM_DEFAULT;
int end_algo = HIPBLAS_GEMM_DEFAULT;
const double d_alpha = 1.0, d_beta = 0.0;
const float f_alpha = 1.f, f_beta = 0.f;
const __half h_alpha = __float2half_rn(1.f), h_beta = __float2half_rn(0.f);
const int32_t i_alpha = 1, i_beta = 0;
double *dA, *dB, *dC;
float *fA, *fB, *fC;
__half *hA, *hB, *hC;
int8_t *iA, *iB; int32_t *iC;
allocate_memory(m, n, k, &dA, &dB, &dC);
allocate_memory(m, n, k, &fA, &fB, &fC);
allocate_memory(m, n, k, &hA, &hB, &hC);
allocate_memory(m, n, k, &iA, &iB, &iC);
for (int i = 0; i < m * k; ++i) {
dA[i] = double(i % 255 - 127) / 127;
fA[i] = float(i % 255 - 127) / 127;
hA[i] = __float2half_rn(fA[i]);
iA[i] = float2int8(fA[i], 127);
}
for (int i = 0; i < k * n; ++i) {
dB[i] = double(i % 255 - 127) / 127;
fB[i] = float(i % 255 - 127) / 127;
hB[i] = __float2half_rn(fB[i]);
iB[i] = float2int8(fB[i], 127);
}
hipblasHandle_t handle;
hipblasCreate(&handle);
printf(">>>>>>>>>>>>>>>>> test fp64 >>>>>>>>>>>>>>>>>\n");
for (int algo = start_algo; algo <= end_algo; ++algo)
test_gemm(handle, m, n, k, dA, dB, dC, &d_alpha, &d_beta, algo, iteration);
printf(">>>>>>>>>>>>>>>>> test fp32 >>>>>>>>>>>>>>>>>\n");
for (int algo = start_algo; algo <= end_algo; ++algo)
test_gemm(handle, m, n, k, fA, fB, fC, &f_alpha, &f_beta, algo, iteration);
printf(">>>>>>>>>>>>>>>>> test fp16 >>>>>>>>>>>>>>>>>\n");
for (int algo = start_algo; algo <= end_algo; ++algo)
test_gemm(handle, m, n, k, hA, hB, hC, &h_alpha, &h_beta, algo, iteration);
printf(">>>>>>>>>>>>>>>>> test int8 >>>>>>>>>>>>>>>>>\n");
for (int algo = start_algo; algo <= end_algo; ++algo)
test_gemm(handle, m, n, k, iA, iB, iC, &i_alpha, &i_beta, algo, iteration);
printf(">>>>>>>>>>>>>>>>> compare result >>>>>>>>>>>>>>>>>\n");
printf("fp64: ");
for (int i = 0; i < 10; ++i)
printf("%.5lf%c", fC[i], " \n"[i==9]);
printf("fp32: ");
for (int i = 0; i < 10; ++i)
printf("%.5f%c", fC[i], " \n"[i==9]);
printf("fp16: ");
for (int i = 0; i < 10; ++i)
printf("%.5f%c", float(hC[i]), " \n"[i==9]);
printf("int8: ");
for (int i = 0; i < 10; ++i)
printf("%.5f%c", float(iC[i])/127/127, " \n"[i==9]);
free_memory(dA, dB, dC);
free_memory(fA, fB, fC);
free_memory(hA, hB, hC);
free_memory(iA, iB, iC);
return 0;
}
| 057ea036a5de5aa1be1fef752243db0c76305f92.cu | #include <sys/time.h>
#include <stdio.h>
#include <type_traits> // is_same
#include <cuda.h>
#include <cuda_fp16.h>
#include <cublas_v2.h>
#include "utils.h"
template <typename T, typename S>
void allocate_memory(int m, int n, int k, T **A, T **B, S **C) {
cudaMallocManaged(A, m * k * sizeof(T));
cudaMallocManaged(B, k * n * sizeof(T));
cudaMallocManaged(C, m * n * sizeof(S));
}
template <typename T, typename S>
void free_memory(T *A, T *B, S *C) {
cudaFree(A);
cudaFree(B);
cudaFree(C);
}
template <typename T, typename S>
bool cublas_gemm_ex(
cublasHandle_t handle, cublasOperation_t transA, cublasOperation_t transB,
const int m, const int n, const int k,
T *A, T *B, S *C,
int lda, int ldb, int ldc,
const S *alpha, const S *beta, int algo)
{
cudaDataType_t AType, BType, CType, ComputeType;
if (std::is_same<T, double>::value) {
AType = BType = CType = ComputeType = CUDA_R_64F;
} else if (std::is_same<T, float>::value) {
AType = BType = CType = ComputeType = CUDA_R_32F;
} else if (std::is_same<T, __half>::value) {
AType = BType = CType = ComputeType = CUDA_R_16F;
} else if (std::is_same<T, int8_t>::value) {
AType = BType = CUDA_R_8I;
CType = ComputeType = CUDA_R_32I;
} else {
printf("Not supported data type.");
return false;
}
cublasStatus_t status = cublasGemmEx(handle,
transA, transB,
m, n, k,
alpha, A, AType, lda,
B, BType, ldb, beta,
C, CType, ldc, ComputeType,
static_cast<cublasGemmAlgo_t>(algo));
return (status == CUBLAS_STATUS_SUCCESS);
}
template <typename T, typename S>
void test_gemm(cublasHandle_t handle,
const int m, const int n, const int k,
T *A, T *B, S *C,
const S *alpha, const S *beta, int algo, const int iteration)
{
double total_time = 0;
struct timeval start, end;
for (int i = 0; i < iteration; ++i) {
cudaDeviceSynchronize();
gettimeofday(&start, NULL);
bool success = cublas_gemm_ex(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
n, // number of rows of matrix A and C
m, // number of columns of matrix B and C
k, // number of columns of A and rows of B
B,
A,
C,
n, // lda
k, // ldb
n, // ldc
alpha,
beta,
static_cast<cublasGemmAlgo_t>(algo));
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
if (!success) break;
else if (i > 0) {
total_time += (end.tv_sec - start.tv_sec) * 1000 + (end.tv_usec - start.tv_usec) * 0.001;
}
}
if (total_time > 0.0) {
double avg_time = total_time / (iteration - 1);
printf("algo %d: %.3f ms\n", algo, avg_time);
performance(m, n, k, std::is_same<T, int8_t>::value, avg_time * 1e-3);
}
}
int main(int argc, char* argv[]) {
if (argc != 5) {
printf("Usage: %s <M> <N> <K> <iterations>\n", argv[0]);
printf("C = A X B (A: M * K, B: K * N, C: M * N)\n");
return 1;
}
const int m = atoi(argv[1]);
const int n = atoi(argv[2]);
const int k = atoi(argv[3]);
const int iteration = atoi(argv[4]);
printf("shape: (%d, %d) x (%d, %d)\n", m, k, k, n);
int start_algo = CUBLAS_GEMM_DEFAULT;
int end_algo = CUBLAS_GEMM_DEFAULT;
const double d_alpha = 1.0, d_beta = 0.0;
const float f_alpha = 1.f, f_beta = 0.f;
const __half h_alpha = __float2half_rn(1.f), h_beta = __float2half_rn(0.f);
const int32_t i_alpha = 1, i_beta = 0;
double *dA, *dB, *dC;
float *fA, *fB, *fC;
__half *hA, *hB, *hC;
int8_t *iA, *iB; int32_t *iC;
allocate_memory(m, n, k, &dA, &dB, &dC);
allocate_memory(m, n, k, &fA, &fB, &fC);
allocate_memory(m, n, k, &hA, &hB, &hC);
allocate_memory(m, n, k, &iA, &iB, &iC);
for (int i = 0; i < m * k; ++i) {
dA[i] = double(i % 255 - 127) / 127;
fA[i] = float(i % 255 - 127) / 127;
hA[i] = __float2half_rn(fA[i]);
iA[i] = float2int8(fA[i], 127);
}
for (int i = 0; i < k * n; ++i) {
dB[i] = double(i % 255 - 127) / 127;
fB[i] = float(i % 255 - 127) / 127;
hB[i] = __float2half_rn(fB[i]);
iB[i] = float2int8(fB[i], 127);
}
cublasHandle_t handle;
cublasCreate(&handle);
printf(">>>>>>>>>>>>>>>>> test fp64 >>>>>>>>>>>>>>>>>\n");
for (int algo = start_algo; algo <= end_algo; ++algo)
test_gemm(handle, m, n, k, dA, dB, dC, &d_alpha, &d_beta, algo, iteration);
printf(">>>>>>>>>>>>>>>>> test fp32 >>>>>>>>>>>>>>>>>\n");
for (int algo = start_algo; algo <= end_algo; ++algo)
test_gemm(handle, m, n, k, fA, fB, fC, &f_alpha, &f_beta, algo, iteration);
printf(">>>>>>>>>>>>>>>>> test fp16 >>>>>>>>>>>>>>>>>\n");
for (int algo = start_algo; algo <= end_algo; ++algo)
test_gemm(handle, m, n, k, hA, hB, hC, &h_alpha, &h_beta, algo, iteration);
printf(">>>>>>>>>>>>>>>>> test int8 >>>>>>>>>>>>>>>>>\n");
for (int algo = start_algo; algo <= end_algo; ++algo)
test_gemm(handle, m, n, k, iA, iB, iC, &i_alpha, &i_beta, algo, iteration);
printf(">>>>>>>>>>>>>>>>> compare result >>>>>>>>>>>>>>>>>\n");
printf("fp64: ");
for (int i = 0; i < 10; ++i)
printf("%.5lf%c", fC[i], " \n"[i==9]);
printf("fp32: ");
for (int i = 0; i < 10; ++i)
printf("%.5f%c", fC[i], " \n"[i==9]);
printf("fp16: ");
for (int i = 0; i < 10; ++i)
printf("%.5f%c", float(hC[i]), " \n"[i==9]);
printf("int8: ");
for (int i = 0; i < 10; ++i)
printf("%.5f%c", float(iC[i])/127/127, " \n"[i==9]);
free_memory(dA, dB, dC);
free_memory(fA, fB, fC);
free_memory(hA, hB, hC);
free_memory(iA, iB, iC);
return 0;
}
|
f3305cdb1a4ff86041bb3cdaf4778dbaa15babb9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
__global__ void helloFromGpu(void)
{
//printf("hello world from GPU!\n");
printf("hello world from GPU!\n");
}
int main(void)
{
printf("Hello World from CPU!\n");
hipLaunchKernelGGL(( helloFromGpu), dim3(1),dim3(10), 0, 0, );
hipDeviceReset();
return 0;
} | f3305cdb1a4ff86041bb3cdaf4778dbaa15babb9.cu | #include <cstdio>
__global__ void helloFromGpu(void)
{
//printf("hello world from GPU!\n");
printf("hello world from GPU!\n");
}
int main(void)
{
printf("Hello World from CPU!\n");
helloFromGpu<<<1,10>>>();
cudaDeviceReset();
return 0;
} |
3c0319bfcfef41584da6fad4ab836a4a5e3bda22.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
<<<<<<< HEAD
=======
#ifndef OPENCV_TINY_GPU_MODULE
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
namespace filter
{
template void linearColumn<float3, short3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
<<<<<<< HEAD
=======
#endif
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
#endif /* CUDA_DISABLER */
| 3c0319bfcfef41584da6fad4ab836a4a5e3bda22.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
<<<<<<< HEAD
=======
#ifndef OPENCV_TINY_GPU_MODULE
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
namespace filter
{
template void linearColumn<float3, short3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
<<<<<<< HEAD
=======
#endif
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
#endif /* CUDA_DISABLER */
|
b6a6b530bb2f1f19e7f350050cce9a88bf35e72e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper.h"
/*
With very large number of blocks, the magic 32 works.
Make the threads in a warp run the same instructions(if-else branch) will
accelerate the performace.
e.g. A simple assignment operation, the warp divergent version takes 0.12ms,
while the optimized one takes only 0.05ms, one-time acceleration.
But with small number of blocks, the optimized is slower.
*/
__global__ void kernel0(float *c) {
int idx = threadIdx.x + blockDim.x * blockIdx.x; // 1-d block
float a = 0., b = 0.;
if (idx % 2 == 0) {
a = 100.;
} else {
b = 100;
}
c[idx] = a + b;
}
__global__ void kernel1(float *c) {
int idx = threadIdx.x + blockDim.x * blockIdx.x; // 1-d block
float a = 0., b = 0.;
if (idx / kWarpSize % 2 == 0) {
a = 100.;
} else {
b = 100;
}
c[idx] = a + b;
}
int main() {
int dim = 12800;
float *dC;
NV_CHECK(hipMalloc(&dC, dim * sizeof(float)));
NV_CHECK(hipMemset(dC, 0, dim * sizeof(float)));
// warm up
hipLaunchKernelGGL(( kernel0), dim3(dim), dim3(1), 0, 0, dC);
hipLaunchKernelGGL(( kernel1), dim3(dim), dim3(1), 0, 0, dC);
Timer timer;
hipLaunchKernelGGL(( kernel0), dim3(dim), dim3(1), 0, 0, dC);
LOG(INFO) << "kernel1: " << timer.peek();
timer.peek();
hipLaunchKernelGGL(( kernel1), dim3(dim), dim3(1), 0, 0, dC);
LOG(INFO) << "kernel2: " << timer.peek();
return 0;
}
| b6a6b530bb2f1f19e7f350050cce9a88bf35e72e.cu | #include "helper.h"
/*
With very large number of blocks, the magic 32 works.
Make the threads in a warp run the same instructions(if-else branch) will
accelerate the performace.
e.g. A simple assignment operation, the warp divergent version takes 0.12ms,
while the optimized one takes only 0.05ms, one-time acceleration.
But with small number of blocks, the optimized is slower.
*/
__global__ void kernel0(float *c) {
int idx = threadIdx.x + blockDim.x * blockIdx.x; // 1-d block
float a = 0., b = 0.;
if (idx % 2 == 0) {
a = 100.;
} else {
b = 100;
}
c[idx] = a + b;
}
__global__ void kernel1(float *c) {
int idx = threadIdx.x + blockDim.x * blockIdx.x; // 1-d block
float a = 0., b = 0.;
if (idx / kWarpSize % 2 == 0) {
a = 100.;
} else {
b = 100;
}
c[idx] = a + b;
}
int main() {
int dim = 12800;
float *dC;
NV_CHECK(cudaMalloc(&dC, dim * sizeof(float)));
NV_CHECK(cudaMemset(dC, 0, dim * sizeof(float)));
// warm up
kernel0<<<dim, 1>>>(dC);
kernel1<<<dim, 1>>>(dC);
Timer timer;
kernel0<<<dim, 1>>>(dC);
LOG(INFO) << "kernel1: " << timer.peek();
timer.peek();
kernel1<<<dim, 1>>>(dC);
LOG(INFO) << "kernel2: " << timer.peek();
return 0;
}
|
ed62950d362996b44ac97bd9c75402af543b5baf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
__global__ void block(float *a, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>=N) return;//Without it, num of threads is 2048
a[i] = i;
}
int main(void) {
const int N = 2000;
const int M = 1024;
float *a;
hipMallocManaged(&a, N*sizeof(float));
hipLaunchKernelGGL(( block), dim3((N+M-1)/M),dim3(M), 0, 0, a,N);
hipDeviceSynchronize();
for (int i=0; i<N; i++)
printf("%d %g\n",i,a[i]);
hipFree(a);
} | ed62950d362996b44ac97bd9c75402af543b5baf.cu | #include <cstdio>
__global__ void block(float *a, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>=N) return;//Without it, num of threads is 2048
a[i] = i;
}
int main(void) {
const int N = 2000;
const int M = 1024;
float *a;
cudaMallocManaged(&a, N*sizeof(float));
block<<<(N+M-1)/M,M>>>(a,N);
cudaDeviceSynchronize();
for (int i=0; i<N; i++)
printf("%d %g\n",i,a[i]);
cudaFree(a);
} |
e8c27a062f01b0fac82ec5f06ea7607f98b6e78e.hip | // !!! This is a file automatically generated by hipify!!!
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang_cc1 -std=c++11 -fcuda-is-device -triple nvptx64-nvidia-cuda -emit-llvm -o - %s | FileCheck --check-prefix=DEVICE %s
// RUN: echo "GPU binary would be here" > %t
// RUN: %clang_cc1 -std=c++11 -triple x86_64-unknown-linux-gnu -target-sdk-version=8.0 -fcuda-include-gpubinary %t -emit-llvm -o - %s | FileCheck --check-prefix=HOST %s
struct hipSurfaceReference {
int desc;
};
template <typename T, int dim = 1>
struct __attribute__((device_builtin_surface_type)) surface : public hipSurfaceReference {
};
// Partial specialization over `void`.
template<int dim>
struct __attribute__((device_builtin_surface_type)) surface<void, dim> : public hipSurfaceReference {
};
// On the device side, surface references are represented as `i64` handles.
// DEVICE: @surf ={{.*}} addrspace(1) externally_initialized global i64 undef, align 4
// On the host side, they remain in the original type.
// HOST: @surf = internal global %struct.surface
// HOST: @0 = private unnamed_addr constant [5 x i8] c"surf\00"
surface<void, 2> surf;
__attribute__((device)) int suld_2d_zero(surface<void, 2>, int, int) asm("llvm.nvvm.suld.2d.i32.zero");
// DEVICE-LABEL: i32 @_Z3fooii(i32 noundef %x, i32 noundef %y)
// DEVICE: call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @surf)
// DEVICE: call noundef i32 @llvm.nvvm.suld.2d.i32.zero(i64 %{{.*}}, i32 noundef %{{.*}}, i32 noundef %{{.*}})
__attribute__((device)) int foo(int x, int y) {
return suld_2d_zero(surf, x, y);
}
// HOST: define internal void @[[PREFIX:__cuda]]_register_globals
// Texture references need registering with correct arguments.
// HOST: call void @[[PREFIX]]RegisterSurface(i8** %0, i8*{{.*}}({{.*}}@surf{{.*}}), i8*{{.*}}({{.*}}@0{{.*}}), i8*{{.*}}({{.*}}@0{{.*}}), i32 2, i32 0)
// They also need annotating in metadata.
// DEVICE: !0 = !{i64 addrspace(1)* @surf, !"surface", i32 1}
| e8c27a062f01b0fac82ec5f06ea7607f98b6e78e.cu | // REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang_cc1 -std=c++11 -fcuda-is-device -triple nvptx64-nvidia-cuda -emit-llvm -o - %s | FileCheck --check-prefix=DEVICE %s
// RUN: echo "GPU binary would be here" > %t
// RUN: %clang_cc1 -std=c++11 -triple x86_64-unknown-linux-gnu -target-sdk-version=8.0 -fcuda-include-gpubinary %t -emit-llvm -o - %s | FileCheck --check-prefix=HOST %s
struct surfaceReference {
int desc;
};
template <typename T, int dim = 1>
struct __attribute__((device_builtin_surface_type)) surface : public surfaceReference {
};
// Partial specialization over `void`.
template<int dim>
struct __attribute__((device_builtin_surface_type)) surface<void, dim> : public surfaceReference {
};
// On the device side, surface references are represented as `i64` handles.
// DEVICE: @surf ={{.*}} addrspace(1) externally_initialized global i64 undef, align 4
// On the host side, they remain in the original type.
// HOST: @surf = internal global %struct.surface
// HOST: @0 = private unnamed_addr constant [5 x i8] c"surf\00"
surface<void, 2> surf;
__attribute__((device)) int suld_2d_zero(surface<void, 2>, int, int) asm("llvm.nvvm.suld.2d.i32.zero");
// DEVICE-LABEL: i32 @_Z3fooii(i32 noundef %x, i32 noundef %y)
// DEVICE: call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @surf)
// DEVICE: call noundef i32 @llvm.nvvm.suld.2d.i32.zero(i64 %{{.*}}, i32 noundef %{{.*}}, i32 noundef %{{.*}})
__attribute__((device)) int foo(int x, int y) {
return suld_2d_zero(surf, x, y);
}
// HOST: define internal void @[[PREFIX:__cuda]]_register_globals
// Texture references need registering with correct arguments.
// HOST: call void @[[PREFIX]]RegisterSurface(i8** %0, i8*{{.*}}({{.*}}@surf{{.*}}), i8*{{.*}}({{.*}}@0{{.*}}), i8*{{.*}}({{.*}}@0{{.*}}), i32 2, i32 0)
// They also need annotating in metadata.
// DEVICE: !0 = !{i64 addrspace(1)* @surf, !"surface", i32 1}
|
38d727a222e6b6462b37f4da184785e8a9d9d5c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuhelpers.h"
#include "isub.h"
#include <cassert>
/// dst = src[idxs, 0:X] where J = len(indxs)
__global__ void isub(float *dst, const float *src, const int *idxs, int J, int X) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= J) return;
dst += j * X;
src += idxs[j] * X;
for (size_t x = 0; x < X; ++x) dst[x] = src[x];
}
/// main 2D isub function
void d_isub(float *dst, const float *src, const int *idxs, int J, int X, bool _sync) {
assert(dst != src);
dim3 thrds(NIMPA_CU_THREADS, 1, 1);
dim3 blcks((J + NIMPA_CU_THREADS - 1) / NIMPA_CU_THREADS, 1, 1);
hipLaunchKernelGGL(( isub), dim3(blcks), dim3(thrds), 0, 0, dst, src, idxs, J, X);
HANDLE_ERROR(hipGetLastError());
if (_sync) HANDLE_ERROR(hipDeviceSynchronize()); // unified memcpy device2host
}
| 38d727a222e6b6462b37f4da184785e8a9d9d5c9.cu | #include "cuhelpers.h"
#include "isub.h"
#include <cassert>
/// dst = src[idxs, 0:X] where J = len(indxs)
__global__ void isub(float *dst, const float *src, const int *idxs, int J, int X) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= J) return;
dst += j * X;
src += idxs[j] * X;
for (size_t x = 0; x < X; ++x) dst[x] = src[x];
}
/// main 2D isub function
void d_isub(float *dst, const float *src, const int *idxs, int J, int X, bool _sync) {
assert(dst != src);
dim3 thrds(NIMPA_CU_THREADS, 1, 1);
dim3 blcks((J + NIMPA_CU_THREADS - 1) / NIMPA_CU_THREADS, 1, 1);
isub<<<blcks, thrds>>>(dst, src, idxs, J, X);
HANDLE_ERROR(cudaGetLastError());
if (_sync) HANDLE_ERROR(cudaDeviceSynchronize()); // unified memcpy device2host
}
|
a83da5aa077d5e2b243a8926d72c7a8a48b492c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "init_rand.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
hiprandState_t *states = NULL;
hipMalloc(&states, XSIZE*YSIZE);
float *randArr = NULL;
hipMalloc(&randArr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
init_rand), dim3(gridBlock),dim3(threadBlock), 0, 0, states,randArr);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
init_rand), dim3(gridBlock),dim3(threadBlock), 0, 0, states,randArr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
init_rand), dim3(gridBlock),dim3(threadBlock), 0, 0, states,randArr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a83da5aa077d5e2b243a8926d72c7a8a48b492c0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "init_rand.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
curandState *states = NULL;
cudaMalloc(&states, XSIZE*YSIZE);
float *randArr = NULL;
cudaMalloc(&randArr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
init_rand<<<gridBlock,threadBlock>>>(states,randArr);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
init_rand<<<gridBlock,threadBlock>>>(states,randArr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
init_rand<<<gridBlock,threadBlock>>>(states,randArr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0c5ea097074a341205b65782e4b68b9d26dd574e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialGridSamplerBilinear.cu"
#else
static inline void THNN_(SpatialGridSamplerBilinear_shapeCheck)(
THCState *state,
THCTensor *input,
THCTensor *grid,
THCTensor *gradOutput) {
THCUNN_argCheck(state, THCTensor_(nDimension)(state, input) == 4, 2, input,
"4D input tensor expected but got: %s");
THCUNN_argCheck(state, THCTensor_(nDimension)(state, grid) == 4, 2, grid,
"4D grid tensor expected but got: %s");
int64_t nbatch = THCTensor_(size)(state, input, 0);
int64_t channels = THCTensor_(size)(state, input, 1);
int64_t iheight = THCTensor_(size)(state, input, 2);
int64_t iwidth = THCTensor_(size)(state, input, 3);
int64_t oheight = THCTensor_(size)(state, grid, 1);
int64_t owidth = THCTensor_(size)(state, grid, 2);
THCUNN_check_dim_size(state, grid, 4, 0, nbatch);
THCUNN_check_dim_size(state, grid, 4, 3, 2);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, 4, 0, nbatch);
THCUNN_check_dim_size(state, gradOutput, 4, 1, channels);
THCUNN_check_dim_size(state, gradOutput, 4, 2, oheight);
THCUNN_check_dim_size(state, gradOutput, 4, 3, owidth);
}
}
TH_API void THNN_(SpatialGridSamplerBilinear_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *grid,
THCTensor *output,
int padding_mode) {
THCUNN_assertSameGPU(state, 3, input, grid, output);
THNN_(SpatialGridSamplerBilinear_shapeCheck)(state, input, grid, NULL);
int64_t N = THCTensor_(size)(state, input, 0);
int64_t C = THCTensor_(size)(state, input, 1);
int64_t IH = THCTensor_(size)(state, input, 2);
int64_t IW = THCTensor_(size)(state, input, 3);
int64_t H = THCTensor_(size)(state,grid, 1);
int64_t W = THCTensor_(size)(state, grid, 2);
// resize output to the same shape as input
THCTensor_(resize4d)(state, output, N, C, H, W);
THCDeviceTensor<real, 4> devInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> devGrid = toDeviceTensor<real, 4>(state, grid);
THCDeviceTensor<real, 4> devOutput = toDeviceTensor<real, 4>(state, output);
int count = static_cast<int>(N*H*W);
hipLaunchKernelGGL(( SpatialGridSamplerBilinear_updateOutput_kernel)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
count, devInput, devGrid, devOutput, padding_mode);
THCudaCheck(hipGetLastError());
}
TH_API void THNN_(SpatialGridSamplerBilinear_updateGradInput)(
THCState *state,
THCTensor *input, THCTensor *gradInput,
THCTensor *grid, THCTensor *gradGrid,
THCTensor *gradOutput,
int padding_mode) {
THCUNN_assertSameGPU(state, 5, input, gradInput, grid, gradGrid, gradOutput);
THNN_(SpatialGridSamplerBilinear_shapeCheck)(state, input, grid, gradOutput);
int64_t N = THCTensor_(size)(state, input, 0);
int64_t C = THCTensor_(size)(state, input, 1);
int64_t IH = THCTensor_(size)(state, input, 2);
int64_t IW = THCTensor_(size)(state, input, 3);
int64_t H = THCTensor_(size)(state, grid, 1);
int64_t W = THCTensor_(size)(state, grid, 2);
THCTensor_(resize4d)(state, gradInput, N, C, IH, IW);
THCTensor_(resize4d)(state, gradGrid, N, H, W, 2);
THCTensor_(zero)(state, gradInput);
THCTensor_(zero)(state, gradGrid);
THCDeviceTensor<real, 4> devInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> devGradInput = toDeviceTensor<real, 4>(state, gradInput);
THCDeviceTensor<real, 4> devGrid = toDeviceTensor<real, 4>(state, grid);
THCDeviceTensor<real, 4> devGradGrid = toDeviceTensor<real, 4>(state, gradGrid);
THCDeviceTensor<real, 4> devGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
int count = static_cast<int>(N*H*W);
hipLaunchKernelGGL(( SpatialGridSamplerBilinear_updateGradInput_kernel)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
count, devInput, devGradInput, devGrid, devGradGrid, devGradOutput, padding_mode);
THCudaCheck(hipGetLastError());
}
#endif
| 0c5ea097074a341205b65782e4b68b9d26dd574e.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialGridSamplerBilinear.cu"
#else
static inline void THNN_(SpatialGridSamplerBilinear_shapeCheck)(
THCState *state,
THCTensor *input,
THCTensor *grid,
THCTensor *gradOutput) {
THCUNN_argCheck(state, THCTensor_(nDimension)(state, input) == 4, 2, input,
"4D input tensor expected but got: %s");
THCUNN_argCheck(state, THCTensor_(nDimension)(state, grid) == 4, 2, grid,
"4D grid tensor expected but got: %s");
int64_t nbatch = THCTensor_(size)(state, input, 0);
int64_t channels = THCTensor_(size)(state, input, 1);
int64_t iheight = THCTensor_(size)(state, input, 2);
int64_t iwidth = THCTensor_(size)(state, input, 3);
int64_t oheight = THCTensor_(size)(state, grid, 1);
int64_t owidth = THCTensor_(size)(state, grid, 2);
THCUNN_check_dim_size(state, grid, 4, 0, nbatch);
THCUNN_check_dim_size(state, grid, 4, 3, 2);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, 4, 0, nbatch);
THCUNN_check_dim_size(state, gradOutput, 4, 1, channels);
THCUNN_check_dim_size(state, gradOutput, 4, 2, oheight);
THCUNN_check_dim_size(state, gradOutput, 4, 3, owidth);
}
}
TH_API void THNN_(SpatialGridSamplerBilinear_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *grid,
THCTensor *output,
int padding_mode) {
THCUNN_assertSameGPU(state, 3, input, grid, output);
THNN_(SpatialGridSamplerBilinear_shapeCheck)(state, input, grid, NULL);
int64_t N = THCTensor_(size)(state, input, 0);
int64_t C = THCTensor_(size)(state, input, 1);
int64_t IH = THCTensor_(size)(state, input, 2);
int64_t IW = THCTensor_(size)(state, input, 3);
int64_t H = THCTensor_(size)(state,grid, 1);
int64_t W = THCTensor_(size)(state, grid, 2);
// resize output to the same shape as input
THCTensor_(resize4d)(state, output, N, C, H, W);
THCDeviceTensor<real, 4> devInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> devGrid = toDeviceTensor<real, 4>(state, grid);
THCDeviceTensor<real, 4> devOutput = toDeviceTensor<real, 4>(state, output);
int count = static_cast<int>(N*H*W);
SpatialGridSamplerBilinear_updateOutput_kernel
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
count, devInput, devGrid, devOutput, padding_mode);
THCudaCheck(cudaGetLastError());
}
TH_API void THNN_(SpatialGridSamplerBilinear_updateGradInput)(
THCState *state,
THCTensor *input, THCTensor *gradInput,
THCTensor *grid, THCTensor *gradGrid,
THCTensor *gradOutput,
int padding_mode) {
THCUNN_assertSameGPU(state, 5, input, gradInput, grid, gradGrid, gradOutput);
THNN_(SpatialGridSamplerBilinear_shapeCheck)(state, input, grid, gradOutput);
int64_t N = THCTensor_(size)(state, input, 0);
int64_t C = THCTensor_(size)(state, input, 1);
int64_t IH = THCTensor_(size)(state, input, 2);
int64_t IW = THCTensor_(size)(state, input, 3);
int64_t H = THCTensor_(size)(state, grid, 1);
int64_t W = THCTensor_(size)(state, grid, 2);
THCTensor_(resize4d)(state, gradInput, N, C, IH, IW);
THCTensor_(resize4d)(state, gradGrid, N, H, W, 2);
THCTensor_(zero)(state, gradInput);
THCTensor_(zero)(state, gradGrid);
THCDeviceTensor<real, 4> devInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> devGradInput = toDeviceTensor<real, 4>(state, gradInput);
THCDeviceTensor<real, 4> devGrid = toDeviceTensor<real, 4>(state, grid);
THCDeviceTensor<real, 4> devGradGrid = toDeviceTensor<real, 4>(state, gradGrid);
THCDeviceTensor<real, 4> devGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
int count = static_cast<int>(N*H*W);
SpatialGridSamplerBilinear_updateGradInput_kernel
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
count, devInput, devGradInput, devGrid, devGradGrid, devGradOutput, padding_mode);
THCudaCheck(cudaGetLastError());
}
#endif
|
3580a5265feae2fa723261123543aa196c8980cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ac_kernel1 ( int *d_state_transition, unsigned int *d_state_supply, unsigned int *d_state_final, unsigned char *d_text, unsigned int *d_out, size_t pitch, int m, int n, int p_size, int alphabet, int numBlocks ) {
//int idx = blockIdx.x * blockDim.x + threadIdx.x;
int effective_pitch = pitch / sizeof ( int );
int charactersPerBlock = n / numBlocks;
int startBlock = blockIdx.x * charactersPerBlock;
int stopBlock = startBlock + charactersPerBlock;
int charactersPerThread = ( stopBlock - startBlock ) / blockDim.x;
int startThread = startBlock + charactersPerThread * threadIdx.x;
int stopThread;
if( blockIdx.x == numBlocks -1 && threadIdx.x==blockDim.x-1)
stopThread = n - 1;
else stopThread = startThread + charactersPerThread + m-1;
int r = 0, s;
int column;
//cuPrintf("Working from %i to %i chars %i\n", startThread, stopThread, charactersPerThread);
for ( column = startThread; ( column < stopThread && column < n ); column++ ) {
while ( ( s = d_state_transition[r * effective_pitch + (d_text[column]-(unsigned char)'A')] ) == -1 )
r = d_state_supply[r];
r = s;
d_out[column] = d_state_final[r];
}
} | 3580a5265feae2fa723261123543aa196c8980cc.cu | #include "includes.h"
__global__ void ac_kernel1 ( int *d_state_transition, unsigned int *d_state_supply, unsigned int *d_state_final, unsigned char *d_text, unsigned int *d_out, size_t pitch, int m, int n, int p_size, int alphabet, int numBlocks ) {
//int idx = blockIdx.x * blockDim.x + threadIdx.x;
int effective_pitch = pitch / sizeof ( int );
int charactersPerBlock = n / numBlocks;
int startBlock = blockIdx.x * charactersPerBlock;
int stopBlock = startBlock + charactersPerBlock;
int charactersPerThread = ( stopBlock - startBlock ) / blockDim.x;
int startThread = startBlock + charactersPerThread * threadIdx.x;
int stopThread;
if( blockIdx.x == numBlocks -1 && threadIdx.x==blockDim.x-1)
stopThread = n - 1;
else stopThread = startThread + charactersPerThread + m-1;
int r = 0, s;
int column;
//cuPrintf("Working from %i to %i chars %i\n", startThread, stopThread, charactersPerThread);
for ( column = startThread; ( column < stopThread && column < n ); column++ ) {
while ( ( s = d_state_transition[r * effective_pitch + (d_text[column]-(unsigned char)'A')] ) == -1 )
r = d_state_supply[r];
r = s;
d_out[column] = d_state_final[r];
}
} |
b10079124e2dd22cae64facbbb272b9d28717e03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/CUDAFunctions.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/LaunchUtils.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPGraphsUtils.cuh>
#include <ATen/native/hip/block_reduce.cuh>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
namespace at { namespace native {
namespace {
int64_t div_up(int64_t a, int64_t b) {
return (a + (b - 1)) / b;
}
template <typename T>
inline __device__ bool _isinf(T x) { return ::isinf(x); }
inline __device__ bool _isinf(c10::Half x) {
return ::isinf(static_cast<float>(x));
}
inline __device__ bool _isinf(c10::BFloat16 x) {
return ::isinf(static_cast<float>(x));
}
#define MAX_NUM_BLOCKS 64
// Normalizes the L1 norm of every row to 1; used by multinomial
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void renormRowsL1(scalar_t* dist, long rows, long cols) {
extern __shared__ unsigned char my_smem[];
scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem);
scalar_t zero = static_cast<scalar_t>(0);
scalar_t val;
for (int64_t row = blockIdx.x; row < rows; row += gridDim.x) {
scalar_t sum = static_cast<scalar_t>(0);
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
val = dist[row * cols + col];
CUDA_KERNEL_ASSERT(!(val < zero)); // ! < 0 for NaN handling
sum = sum + val;
}
sum = cuda_utils::BlockReduceSum(sum, smem);
if (threadIdx.x == 0) {
CUDA_KERNEL_ASSERT(!(val < zero)); // ! < 0 for NaN handling
smem[0] = sum;
}
__syncthreads();
sum = smem[0];
if (sum > zero) {
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
dist[row * cols + col] = dist[row * cols + col] / sum;
}
}
}
}
void renormRows(Tensor& t) {
TORCH_CHECK(t.dim() == 2);
int64_t rows = t.size(0);
int64_t cols = t.size(1);
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
const int64_t maxThreads = ::min(
props->maxThreadsPerBlock, cuda_utils::kCUDABlockReduceMaxThreads);
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(::min(maxThreads, C10_WARP_SIZE * div_up(cols, C10_WARP_SIZE)));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(t.scalar_type(), "renormRows_cuda", [&] {
hipLaunchKernelGGL(( renormRowsL1<scalar_t>)
, dim3(grid), dim3(block), (block.x / C10_WARP_SIZE) * sizeof(scalar_t),
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), t.data_ptr<scalar_t>(),
rows, cols);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
template <typename scalar_t>
__device__ int binarySearchForMultinomial(scalar_t* cumdist,
scalar_t* dist,
int size,
scalar_t val) {
int start = 0;
int end = size;
// cumdist[size - 1] = 0 => all zero prob dist
CUDA_KERNEL_ASSERT(cumdist[size - 1] > static_cast<scalar_t>(0));
while (end - start > 0) {
int mid = start + (end - start) / 2;
scalar_t midVal = cumdist[mid];
if (midVal < val) {
start = mid + 1;
} else {
end = mid;
}
}
if (start == size) {
// No probability mass or precision problems; just return the
// first non-zero element by setting start to size-1 here,
// the code below will move it to the last non-zero probability
// this actually can happen when the random number is 1
// (github pytorch issue #4858).
start = size - 1;
}
while(start >= 1 && dist[start] == 0) start--;
return start;
}
template <typename scalar_t>
__global__ void
sampleMultinomialWithReplacement(PhiloxCudaState philox_args,
int totalSamples,
int64_t* dest,
int64_t distributions,
int categories,
scalar_t* normDistPrefixSum,
scalar_t* normDist) {
// At the moment, each warp computes one sample value in the binary
// search due to divergence. It seems possible to compute multiple
// values and limit divergence though later on.
auto seeds = at::cuda::philox::unpack(philox_args);
// global index formula for 2D grid of 1D blocks
int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
// The block determines the distribution for which we generate a point
for (int64_t curDist = blockIdx.y;
curDist < distributions;
curDist += gridDim.y) {
for (int sample = blockIdx.x*blockDim.x + threadIdx.x;
sample < totalSamples; sample += blockDim.x*gridDim.x) {
//we are losing 3 out of 4 generated numbers but it's ok
//this kernel is not very efficient anyway
auto rand = hiprand_uniform4(&state);
scalar_t r = static_cast<scalar_t>(rand.x);
// Find the bucket that a uniform sample lies in
int choice = binarySearchForMultinomial<scalar_t>(
normDistPrefixSum + curDist * categories,
normDist + curDist * categories,
categories,
r);
dest[curDist * totalSamples + sample] = choice;
}
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void sampleMultinomialOnce(
int64_t* dest,
int64_t distributions,
int categories,
scalar_t* sampled,
scalar_t* dist,
int stride_dist, // dist->stride(0)
int stride_categories // dist->stride(1)
) {
extern __shared__ unsigned char my_smem[];
__shared__ bool found;
__shared__ unsigned foundPos;
accscalar_t *smem = reinterpret_cast<accscalar_t *>(my_smem);
accscalar_t accZero = static_cast<accscalar_t>(0);
scalar_t zero = static_cast<scalar_t>(0);
for (int64_t curDist = blockIdx.x;
curDist < distributions; curDist += gridDim.x) {
// Each block handles one distribution
// First pass, find the total sum of the distribution
accscalar_t sum = accZero;
scalar_t val;
for (int cat = threadIdx.x; cat < categories; cat += blockDim.x) {
val = dist[curDist * stride_dist + cat * stride_categories];
CUDA_KERNEL_ASSERT(!at::_isnan(val));
CUDA_KERNEL_ASSERT(!_isinf(val));
CUDA_KERNEL_ASSERT(!(val < zero));
sum = sum + static_cast<accscalar_t>(val);
}
// threadIdx.x == 0 has the sum value from this
sum = cuda_utils::BlockReduceSum(sum, smem);
// Broadcast sum and sample value
if (threadIdx.x == 0) {
// Make sure the sum of our distribution didn't overflow
CUDA_KERNEL_ASSERT(!_isinf(val));
CUDA_KERNEL_ASSERT(sum > accZero);
foundPos = 0;
smem[0] = sum;
smem[1] = sampled[curDist];
}
__syncthreads();
sum = smem[0];
scalar_t sample = static_cast<scalar_t>(smem[1]);
__syncthreads();
if (sum == accZero) {
// Choose the first element
if (threadIdx.x == 0) {
dest[curDist] = 0;
}
continue;
}
int chunks = (categories + (int)blockDim.x - 1) / blockDim.x;
accscalar_t prevHighProb = accZero;
found = false;
for (int chunk = 0; chunk < chunks && !found; ++chunk) {
// All threads in bounds load a value
int cat = chunk * blockDim.x + threadIdx.x;
accscalar_t dist_val = cat < categories ?
static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum :
accZero;
smem[threadIdx.x] = dist_val;
__syncthreads();
// Perform an inclusive prefix sum of the shared memory contents
for (int offset = 1; offset < blockDim.x; offset *= 2) {
accscalar_t val = accZero;
if (threadIdx.x >= offset) {
val = smem[threadIdx.x - offset] + smem[threadIdx.x];
}
__syncthreads();
if (threadIdx.x >= offset) {
smem[threadIdx.x] = val;
}
__syncthreads();
}
// Each thread will check to see if the sample falls in its
// bucket
scalar_t curBucket =
static_cast<scalar_t>(smem[threadIdx.x] + prevHighProb);
scalar_t prevBucket = static_cast<scalar_t>(
threadIdx.x == 0 ? prevHighProb
: smem[threadIdx.x - 1] + prevHighProb);
bool inBucket =
(cat < categories) &&
(!(sample >= curBucket) &&
(sample >= prevBucket) &&
(dist_val > zero));
if (inBucket) {
// We're done; we have the sample
// Torch indices are 1-based
atomicMax(&foundPos, cat);
found = true;
}
// Store the previous scan's high value for future use
prevHighProb = prevHighProb + smem[blockDim.x - 1];
__syncthreads();
}
if (threadIdx.x == 0) {
if (found) {
dest[curDist] = foundPos;
} else {
// This should address a rare bug where we don't select a valid index. This likely occurs when
// due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but
// and our uniform sample is greater than this value. In this case we likely have unitialized memory
// in dest[curDist]. So basically we will loop through the distribution and pick the largest index
// where the distribution is non-zero. This is obviously terribly inefficient, but due to the
// rarity in which this occurs, this should not be an issue.
for (int cat = categories - 1; cat >= 0; --cat) {
if (dist[curDist * stride_dist + cat * stride_categories] > zero) {
dest[curDist] = cat;
break;
}
}
}
}
}
}
void multinomial_with_replacement_kernel_impl(
Tensor& result,
const Tensor& self,
const int64_t n_sample,
c10::optional<Generator> generator) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(generator, cuda::detail::getDefaultCUDAGenerator());
int inputSize = self.dim();
int64_t numDist =
inputSize == 1 ? 1 : self.size(0);
int numCategories =
inputSize == 1 ? self.size(0) : self.size(1);
// Restructure data for 2d
auto self_v = inputSize == 1 ? self.view({numDist, numCategories}) : self;
result.resize_({numDist, n_sample});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self_v.scalar_type(), "multinomial_kernel_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredWarps = at::cuda::ATenCeilDiv(numCategories, C10_WARP_SIZE);
int requiredThreads = ::min(maxThreads, requiredWarps * C10_WARP_SIZE);
int requiredShared = requiredThreads * sizeof(accscalar_t);
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
Tensor sampled = native::empty_cuda({numDist, n_sample}, optTypeMetaToScalarType(self_v.options().dtype_opt()),
self_v.options().layout_opt(), self_v.options().device_opt(),
self_v.options().pinned_memory_opt());
at::native::uniform_(sampled, 0.0, 1.0, generator);
dim3 block(requiredThreads);
dim3 grid(::min(static_cast<int>(numDist), numSM * 4));
hipLaunchKernelGGL(( sampleMultinomialOnce<scalar_t, accscalar_t>)
, dim3(grid), dim3(block),
requiredShared,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<int64_t>(),
numDist,
numCategories,
sampled.data_ptr<scalar_t>(),
self_v.data_ptr<scalar_t>(),
self_v.stride(0),
self_v.stride(1)
);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
Tensor origDist = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
origDist.copy_(self_v);
Tensor normDist = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor prefixSum = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// Renorm along rows
normDist.copy_(origDist);
renormRows(normDist);
// Prefix sum along rows
at::cuda::cumsum_out(prefixSum, normDist, 1);
PhiloxCudaState rng_engine_inputs;
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(128);
// Each block will generate a sample from one
// distribution concurrently.
int grid_y=std::min<int>(numDist, at::cuda::getCurrentDeviceProperties()->maxGridSize[1]);
dim3 grid((n_sample-1)/block.x+1, grid_y);
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
// each thread generates a single sample for (numdist/numblocks.y) distributions, however, since we have to use
// hiprand_uniform4 (See Note [Register spilling in hiprand call for CUDA < 10]),
// offset is 4 times that.
auto offset = ((numDist-1)/grid.y+1)*4;
rng_engine_inputs = gen->philox_cuda_state(offset);
}
// Sample with replacement
hipLaunchKernelGGL(( sampleMultinomialWithReplacement)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
rng_engine_inputs,
n_sample,
result.data_ptr<int64_t>(),
numDist, numCategories,
prefixSum.data_ptr<scalar_t>(),
normDist.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
if (inputSize == 1) {
result.resize_({n_sample});
}
}
}
REGISTER_DISPATCH(
multinomial_with_replacement_stub,
&multinomial_with_replacement_kernel_impl);
}}
| b10079124e2dd22cae64facbbb272b9d28717e03.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/CUDAFunctions.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/LaunchUtils.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
namespace at { namespace native {
namespace {
int64_t div_up(int64_t a, int64_t b) {
return (a + (b - 1)) / b;
}
template <typename T>
inline __device__ bool _isinf(T x) { return ::isinf(x); }
inline __device__ bool _isinf(c10::Half x) {
return ::isinf(static_cast<float>(x));
}
inline __device__ bool _isinf(c10::BFloat16 x) {
return ::isinf(static_cast<float>(x));
}
#define MAX_NUM_BLOCKS 200
// Normalizes the L1 norm of every row to 1; used by multinomial
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void renormRowsL1(scalar_t* dist, long rows, long cols) {
extern __shared__ unsigned char my_smem[];
scalar_t *smem = reinterpret_cast<scalar_t *>(my_smem);
scalar_t zero = static_cast<scalar_t>(0);
scalar_t val;
for (int64_t row = blockIdx.x; row < rows; row += gridDim.x) {
scalar_t sum = static_cast<scalar_t>(0);
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
val = dist[row * cols + col];
CUDA_KERNEL_ASSERT(!(val < zero)); // ! < 0 for NaN handling
sum = sum + val;
}
sum = cuda_utils::BlockReduceSum(sum, smem);
if (threadIdx.x == 0) {
CUDA_KERNEL_ASSERT(!(val < zero)); // ! < 0 for NaN handling
smem[0] = sum;
}
__syncthreads();
sum = smem[0];
if (sum > zero) {
for (int64_t col = threadIdx.x; col < cols; col += blockDim.x) {
dist[row * cols + col] = dist[row * cols + col] / sum;
}
}
}
}
void renormRows(Tensor& t) {
TORCH_CHECK(t.dim() == 2);
int64_t rows = t.size(0);
int64_t cols = t.size(1);
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
const int64_t maxThreads = std::min(
props->maxThreadsPerBlock, cuda_utils::kCUDABlockReduceMaxThreads);
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(std::min(maxThreads, C10_WARP_SIZE * div_up(cols, C10_WARP_SIZE)));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(t.scalar_type(), "renormRows_cuda", [&] {
renormRowsL1<scalar_t>
<<<grid, block, (block.x / C10_WARP_SIZE) * sizeof(scalar_t),
at::cuda::getCurrentCUDAStream()>>>(t.data_ptr<scalar_t>(),
rows, cols);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
template <typename scalar_t>
__device__ int binarySearchForMultinomial(scalar_t* cumdist,
scalar_t* dist,
int size,
scalar_t val) {
int start = 0;
int end = size;
// cumdist[size - 1] = 0 => all zero prob dist
CUDA_KERNEL_ASSERT(cumdist[size - 1] > static_cast<scalar_t>(0));
while (end - start > 0) {
int mid = start + (end - start) / 2;
scalar_t midVal = cumdist[mid];
if (midVal < val) {
start = mid + 1;
} else {
end = mid;
}
}
if (start == size) {
// No probability mass or precision problems; just return the
// first non-zero element by setting start to size-1 here,
// the code below will move it to the last non-zero probability
// this actually can happen when the random number is 1
// (github pytorch issue #4858).
start = size - 1;
}
while(start >= 1 && dist[start] == 0) start--;
return start;
}
template <typename scalar_t>
__global__ void
sampleMultinomialWithReplacement(PhiloxCudaState philox_args,
int totalSamples,
int64_t* dest,
int64_t distributions,
int categories,
scalar_t* normDistPrefixSum,
scalar_t* normDist) {
// At the moment, each warp computes one sample value in the binary
// search due to divergence. It seems possible to compute multiple
// values and limit divergence though later on.
auto seeds = at::cuda::philox::unpack(philox_args);
// global index formula for 2D grid of 1D blocks
int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(std::get<0>(seeds),
idx,
std::get<1>(seeds),
&state);
// The block determines the distribution for which we generate a point
for (int64_t curDist = blockIdx.y;
curDist < distributions;
curDist += gridDim.y) {
for (int sample = blockIdx.x*blockDim.x + threadIdx.x;
sample < totalSamples; sample += blockDim.x*gridDim.x) {
//we are losing 3 out of 4 generated numbers but it's ok
//this kernel is not very efficient anyway
auto rand = curand_uniform4(&state);
scalar_t r = static_cast<scalar_t>(rand.x);
// Find the bucket that a uniform sample lies in
int choice = binarySearchForMultinomial<scalar_t>(
normDistPrefixSum + curDist * categories,
normDist + curDist * categories,
categories,
r);
dest[curDist * totalSamples + sample] = choice;
}
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void sampleMultinomialOnce(
int64_t* dest,
int64_t distributions,
int categories,
scalar_t* sampled,
scalar_t* dist,
int stride_dist, // dist->stride(0)
int stride_categories // dist->stride(1)
) {
extern __shared__ unsigned char my_smem[];
__shared__ bool found;
__shared__ unsigned foundPos;
accscalar_t *smem = reinterpret_cast<accscalar_t *>(my_smem);
accscalar_t accZero = static_cast<accscalar_t>(0);
scalar_t zero = static_cast<scalar_t>(0);
for (int64_t curDist = blockIdx.x;
curDist < distributions; curDist += gridDim.x) {
// Each block handles one distribution
// First pass, find the total sum of the distribution
accscalar_t sum = accZero;
scalar_t val;
for (int cat = threadIdx.x; cat < categories; cat += blockDim.x) {
val = dist[curDist * stride_dist + cat * stride_categories];
CUDA_KERNEL_ASSERT(!at::_isnan(val));
CUDA_KERNEL_ASSERT(!_isinf(val));
CUDA_KERNEL_ASSERT(!(val < zero));
sum = sum + static_cast<accscalar_t>(val);
}
// threadIdx.x == 0 has the sum value from this
sum = cuda_utils::BlockReduceSum(sum, smem);
// Broadcast sum and sample value
if (threadIdx.x == 0) {
// Make sure the sum of our distribution didn't overflow
CUDA_KERNEL_ASSERT(!_isinf(val));
CUDA_KERNEL_ASSERT(sum > accZero);
foundPos = 0;
smem[0] = sum;
smem[1] = sampled[curDist];
}
__syncthreads();
sum = smem[0];
scalar_t sample = static_cast<scalar_t>(smem[1]);
__syncthreads();
if (sum == accZero) {
// Choose the first element
if (threadIdx.x == 0) {
dest[curDist] = 0;
}
continue;
}
int chunks = (categories + (int)blockDim.x - 1) / blockDim.x;
accscalar_t prevHighProb = accZero;
found = false;
for (int chunk = 0; chunk < chunks && !found; ++chunk) {
// All threads in bounds load a value
int cat = chunk * blockDim.x + threadIdx.x;
accscalar_t dist_val = cat < categories ?
static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum :
accZero;
smem[threadIdx.x] = dist_val;
__syncthreads();
// Perform an inclusive prefix sum of the shared memory contents
for (int offset = 1; offset < blockDim.x; offset *= 2) {
accscalar_t val = accZero;
if (threadIdx.x >= offset) {
val = smem[threadIdx.x - offset] + smem[threadIdx.x];
}
__syncthreads();
if (threadIdx.x >= offset) {
smem[threadIdx.x] = val;
}
__syncthreads();
}
// Each thread will check to see if the sample falls in its
// bucket
scalar_t curBucket =
static_cast<scalar_t>(smem[threadIdx.x] + prevHighProb);
scalar_t prevBucket = static_cast<scalar_t>(
threadIdx.x == 0 ? prevHighProb
: smem[threadIdx.x - 1] + prevHighProb);
bool inBucket =
(cat < categories) &&
(!(sample >= curBucket) &&
(sample >= prevBucket) &&
(dist_val > zero));
if (inBucket) {
// We're done; we have the sample
// Torch indices are 1-based
atomicMax(&foundPos, cat);
found = true;
}
// Store the previous scan's high value for future use
prevHighProb = prevHighProb + smem[blockDim.x - 1];
__syncthreads();
}
if (threadIdx.x == 0) {
if (found) {
dest[curDist] = foundPos;
} else {
// This should address a rare bug where we don't select a valid index. This likely occurs when
// due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but
// and our uniform sample is greater than this value. In this case we likely have unitialized memory
// in dest[curDist]. So basically we will loop through the distribution and pick the largest index
// where the distribution is non-zero. This is obviously terribly inefficient, but due to the
// rarity in which this occurs, this should not be an issue.
for (int cat = categories - 1; cat >= 0; --cat) {
if (dist[curDist * stride_dist + cat * stride_categories] > zero) {
dest[curDist] = cat;
break;
}
}
}
}
}
}
void multinomial_with_replacement_kernel_impl(
Tensor& result,
const Tensor& self,
const int64_t n_sample,
c10::optional<Generator> generator) {
auto gen = get_generator_or_default<CUDAGeneratorImpl>(generator, cuda::detail::getDefaultCUDAGenerator());
int inputSize = self.dim();
int64_t numDist =
inputSize == 1 ? 1 : self.size(0);
int numCategories =
inputSize == 1 ? self.size(0) : self.size(1);
// Restructure data for 2d
auto self_v = inputSize == 1 ? self.view({numDist, numCategories}) : self;
result.resize_({numDist, n_sample});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self_v.scalar_type(), "multinomial_kernel_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto props = at::cuda::getCurrentDeviceProperties();
CUDA_KERNEL_ASSERT(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredWarps = at::cuda::ATenCeilDiv(numCategories, C10_WARP_SIZE);
int requiredThreads = std::min(maxThreads, requiredWarps * C10_WARP_SIZE);
int requiredShared = requiredThreads * sizeof(accscalar_t);
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
Tensor sampled = native::empty_cuda({numDist, n_sample}, optTypeMetaToScalarType(self_v.options().dtype_opt()),
self_v.options().layout_opt(), self_v.options().device_opt(),
self_v.options().pinned_memory_opt());
at::native::uniform_(sampled, 0.0, 1.0, generator);
dim3 block(requiredThreads);
dim3 grid(std::min(static_cast<int>(numDist), numSM * 4));
sampleMultinomialOnce<scalar_t, accscalar_t>
<<<grid, block,
requiredShared,
at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<int64_t>(),
numDist,
numCategories,
sampled.data_ptr<scalar_t>(),
self_v.data_ptr<scalar_t>(),
self_v.stride(0),
self_v.stride(1)
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
Tensor origDist = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
origDist.copy_(self_v);
Tensor normDist = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor prefixSum = native::empty_like(
self_v,
c10::nullopt /* dtype */,
c10::nullopt /* layout */,
c10::nullopt /* device */,
c10::nullopt /* pin_memory */,
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// Renorm along rows
normDist.copy_(origDist);
renormRows(normDist);
// Prefix sum along rows
at::cuda::cumsum_out(prefixSum, normDist, 1);
PhiloxCudaState rng_engine_inputs;
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(128);
// Each block will generate a sample from one
// distribution concurrently.
int grid_y=std::min<int>(numDist, at::cuda::getCurrentDeviceProperties()->maxGridSize[1]);
dim3 grid((n_sample-1)/block.x+1, grid_y);
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
// each thread generates a single sample for (numdist/numblocks.y) distributions, however, since we have to use
// curand_uniform4 (See Note [Register spilling in curand call for CUDA < 10]),
// offset is 4 times that.
auto offset = ((numDist-1)/grid.y+1)*4;
rng_engine_inputs = gen->philox_cuda_state(offset);
}
// Sample with replacement
sampleMultinomialWithReplacement
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
rng_engine_inputs,
n_sample,
result.data_ptr<int64_t>(),
numDist, numCategories,
prefixSum.data_ptr<scalar_t>(),
normDist.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
if (inputSize == 1) {
result.resize_({n_sample});
}
}
}
REGISTER_DISPATCH(
multinomial_with_replacement_stub,
&multinomial_with_replacement_kernel_impl);
}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.