hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
7a0b5dc69554ce363e68bb6d1203e5eb8bf6337c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GistCudaSetup.cuh"
#include "GistCudaCalc.cuh"
#include "EntropyCalculator.cuh"
#include <iostream>
/**
* Allocate memory on the GPU.
* @parameter array: The pointer to the array, which will be allocated on the GPU.
* @parameter size: An integer giving the size of the array, which will be allocated.
* @throws: CudaException if a problem occurs.
*/
__host__
void allocateCuda(void **array, int size) {
// Check if the array is actually free, if not, it will be freed
// (fun fact: checking is not necessary, one could also simply free the memory).
if ((*array) != NULL) {
hipFree(*array);
}
// If something goes wrong, throw exception
if (hipMalloc(array, size) != hipSuccess) {
throw CudaException();
}
}
/**
* Copy memory from the CPU to the GPU.
* @parameter array: The array from which the values shall be copied.
* @parameter array_c: The array on the device, to which the values shall be copied.
* @parameter size: The size of the stuff which will be copied.
* @throws: CudaException if something goes wrong.
*/
__host__
void copyMemoryToDevice(void *array, void *array_c, int size) {
// If something goes wrong, throw exception
// In this case only copying can go wrong.
if (hipMemcpy(array_c, array, size, hipMemcpyHostToDevice) != hipSuccess) {
throw CudaException();
}
}
/**
* A simple helper function that copies a lot of stuff to the GPU (as structs).
* @parameter charge: An array holding the charges for the different atoms.
* @parameter atomtype: An array holding the integers for the atom types of the different atoms.
* @parameter solvent: An array of boolean values, holding the information whether a certain atom is solvent or solute.
* @parameter atomNumber: The total number of atoms.
* @parameter atomProps_c: A pointer to an array on the GPU, which will hold the atom properties.
* @parameter ljA: An array holding the lennard-jones parameter A for each atom type pair.
* @parameter ljB: An array holding the lennard-jones parameter B for each atom type pair.
* @parameter length: The length of the two aforementioned arrays (ljA & ljB).
* @parameter lJparams_c: A pointer to an array on the GPU, which will hold the lj parameters.
* @throws: CudaException if something bad happens.
*/
__host__
void copyMemoryToDeviceStruct(float *charge, int *atomtype, bool *solvent, int *molecule, int atomNumber, void **atomProps_c,
float *ljA, float *ljB, int length, void **lJparams_c) {
// Check if the two arrays are free. Again, this could be removed (but will stay!)
if ((*atomProps_c) != NULL) {
hipFree(*atomProps_c);
}
if ((*lJparams_c) != NULL) {
hipFree(*lJparams_c);
}
// Allocate the necessary memory on the GPU.
if (hipMalloc(atomProps_c, atomNumber * sizeof(AtomProperties)) != hipSuccess) {
throw CudaException();
}
if (hipMalloc(lJparams_c, length * sizeof(ParamsLJ)) != hipSuccess) {
throw CudaException();
}
// Create an array for the lennard-jones parameters.
ParamsLJ *ljp = (ParamsLJ *) malloc (length * sizeof(ParamsLJ));
// Add the lennard-jones parameters to the array.
for (int i = 0; i < length; ++i) {
ljp[i] = ParamsLJ(ljA[i], ljB[i]);
}
// Create an array for the atom properties.
AtomProperties *array = (AtomProperties *)malloc(atomNumber * sizeof(AtomProperties));
// Add the properties into the array.
for (int i = 0; i < atomNumber; ++i) {
array[i] = AtomProperties(charge[i], atomtype[i], solvent[i], molecule[i]);
}
// Copy the memory from the host to the device.
if (hipMemcpy((*atomProps_c), array, atomNumber * sizeof(AtomProperties), hipMemcpyHostToDevice) != hipSuccess) {
throw CudaException();
}
if (hipMemcpy((*lJparams_c), ljp, length * sizeof(ParamsLJ), hipMemcpyHostToDevice) != hipSuccess) {
throw CudaException();
}
// Free the two arrays (so that no memory leak occurs).
free(ljp);
free(array);
}
/**
* Free an array.
* @parameter array: The array you want to free.
*/
__host__
void freeCuda(void *array) {
hipFree(array);
}
// This is coded C-like, but uses exceptions.
/**
* This starts the cuda kernel, thus it is actually a quite long function.
*/
__host__
std::vector<std::vector<float> > doActionCudaEnergy(const double *coords, int *NBindex_c, int ntypes, void *parameter, void *molecule_c,
int boxinfo, float *recip_o_box, float *ucell, int maxAtoms, float *min_c, float *max_c, int headAtomType,
float neighbourCut2, int *result_o, int *result_n, float *result_w_c, float *result_s_c,
int *result_O_c, int *result_N_c, bool doorder) {
Test *coords_c = NULL;
float *recip_b_c = NULL;
float *ucell_c = NULL;
float *result_A = (float *) calloc(maxAtoms, sizeof(float));
float *result_s = (float *) calloc(maxAtoms, sizeof(float));
// TODO: Fix this, test is actually a quite bad name here!
Test *coord_array = (Test *) calloc(maxAtoms, sizeof(Test));
// Casting
AtomProperties *sender = (AtomProperties *) molecule_c;
ParamsLJ *lennardJonesParams = (ParamsLJ *) parameter;
// Create Boxinfo and Unit cell. This is actually very important for the speed (otherwise
// there would be LOTS of access to non-local variables).
BoxInfo boxinf;
if (boxinfo != 0) {
boxinf = BoxInfo(recip_o_box, boxinfo);
}
UnitCell ucellN;
if (boxinfo == 2) {
ucellN = UnitCell(ucell);
}
// Add the coordinates to the array.
// TODO: Fix Test here also!
for (int i = 0; i < maxAtoms; ++i) {
coord_array[i] = Test(&coords[i * 3]);
}
// vectors that will return the necessary information.
std::vector<std::vector<float> > result;
std::vector<float> result_esw;
std::vector<float> result_eww;
// Allocate space on the GPU
if (hipMalloc(&coords_c, maxAtoms * sizeof(Test)) != hipSuccess) {
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
// Copy the data to the GPU
if (hipMemcpy(coords_c, coord_array, maxAtoms * sizeof(Test), hipMemcpyHostToDevice) != hipSuccess) {
hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
if (hipMemcpy(result_w_c, result_A, maxAtoms * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) {
hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
if (hipMemcpy(result_s_c, result_s, maxAtoms * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) {
hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
// If the doorder calculation is used, it needs to calculate everything differently, so the slow version is used
// (this is about 10% slower).
if (doorder) {
hipLaunchKernelGGL(( cudaCalcEnergySlow), dim3((maxAtoms + SLOW_BLOCKSIZE) / SLOW_BLOCKSIZE), dim3(SLOW_BLOCKSIZE) , 0, 0, coords_c, NBindex_c, ntypes, lennardJonesParams, sender,
boxinf, ucellN, maxAtoms, result_w_c, result_s_c, min_c, max_c,
headAtomType, neighbourCut2, result_O_c, result_N_c);
} else {
// Uses a 2D array, which is nice for memory access.
dim3 threadsPerBlock(BLOCKSIZE, BLOCKSIZE);
dim3 numBlocks((maxAtoms + threadsPerBlock.x) / threadsPerBlock.x, (maxAtoms + threadsPerBlock.y) / threadsPerBlock.y);
// The actual call of the device function
hipLaunchKernelGGL(( cudaCalcEnergy), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, coords_c, NBindex_c, ntypes, lennardJonesParams, sender,
boxinf, ucellN, maxAtoms, result_w_c, result_s_c, min_c, max_c,
headAtomType, neighbourCut2, result_O_c, result_N_c);
// Check if there was an error.
hipError_t hipError_t = hipGetLastError();
if (hipError_t != hipSuccess) {
printf("returned %s\n", hipGetErrorString(hipError_t));
}
}
// Return the results of the calculation to the main memory
if (hipMemcpy(result_A, result_w_c, maxAtoms * sizeof(float), hipMemcpyDeviceToHost) != hipSuccess) {
hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
if (hipMemcpy(result_s, result_s_c, maxAtoms * sizeof(float), hipMemcpyDeviceToHost) != hipSuccess) {
hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
if (hipMemcpy(result_o, result_O_c, maxAtoms * 4 * sizeof(int), hipMemcpyDeviceToHost) != hipSuccess) {
hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
if (hipMemcpy(result_n, result_N_c, maxAtoms * sizeof(int), hipMemcpyDeviceToHost) != hipSuccess) {
hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
for (int i = 0; i < maxAtoms; ++i) {
result_eww.push_back(result_A[i]);
result_esw.push_back(result_s[i]);
}
result.push_back(result_eww);
result.push_back(result_esw);
// Free everything used in here.
hipFree(coords_c); hipFree(recip_b_c); hipFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
return result;
}
#ifdef DEBUG_GIST_CUDA
// Not necessary
__host__
std::vector<Quaternion<float>> shoveQuaternionsTest(std::vector<Quaternion<float> > quats) {
QuaternionG<float> *quats_c = NULL;
float *ret_c = NULL;
std::vector<Quaternion<float> > ret;
float *ret_f = new float[quats.size() * 4];
QuaternionG<float> *quats_f = new QuaternionG<float>[quats.size()];
for (int i = 0; i < quats.size(); ++i) {
quats_f[i] = quats.at(i);
}
if (hipMalloc(&quats_c, quats.size() * sizeof(QuaternionG<float>)) != hipSuccess) {
delete quats_f; delete ret_f;
throw CudaException();
}
if (hipMalloc(&ret_c, quats.size() * 4 * sizeof(float)) != hipSuccess) {
hipFree(quats_c);
delete quats_f; delete ret_f;
throw CudaException();
}
if (hipMemcpy(quats_c, quats_f, quats.size() * sizeof(QuaternionG<float>), hipMemcpyHostToDevice) != hipSuccess) {
hipFree(quats_c); hipFree(ret_c);
delete quats_f; delete ret_f;
throw CudaException();
}
hipLaunchKernelGGL(( shoveQuaternions), dim3((quats.size() + BLOCKSIZE) / BLOCKSIZE), dim3(BLOCKSIZE) , 0, 0, quats_c, quats.size(), ret_c);
if (hipMemcpy(ret_f, ret_c, quats.size() * 4 * sizeof(float), hipMemcpyDeviceToHost) != hipSuccess) {
hipFree(quats_c); hipFree(ret_c);
delete quats_f; delete ret_f;
throw CudaException();
}
for (int i = 0; i < quats.size(); ++i) {
ret.push_back(Quaternion<float>(ret_f[i * 4], ret_f[i * 4 + 1], ret_f[i * 4 + 2], ret_f[i * 4 + 3]));
}
hipFree(quats_c); hipFree(ret_c);
delete quats_f; delete ret_f;
return ret;
}
#endif
/**
* Calculates the entropy on the GPU (this is not really necessary and does not lead to a significant speed up).
* @parameter coords: The coordinates of the different water molecules.
* @parameter x: The number of grid voxels in the x direction.
* @parameter y: The number of grid voxels in the y direction.
* @parameter z: The number of grid voxels in the z direction.
* @parameter quats: A vector object holding all the quaternions.
* @parameter temp: The temperature.
* @parameter rho0: The reference density.
* @parameter nFrames: The total number of frames.
* @return: A vector holding the values for dTStrans, dTSorient and dTSsix.
* @throws: A CudaException on error.
*/
std::vector<std::vector<float> > doActionCudaEntropy(std::vector<std::vector<Vec3> > coords, int x, int y, int z, std::vector<std::vector<Quaternion<float> >> quats, float temp, float rho0, int nFrames) {
// For the CPU
// Input (from previous calculations)
std::vector<QuaternionG<float> > quatsF;
std::vector<float> coordsF;
std::vector<int> cumSumAtoms;
// Results
float *resultTStrans = new float[quats.size()];
float *resultTSorient = new float[quats.size()];
float *resultTSsix = new float[quats.size()];
// For the GPU
// Input (from previous calculations)
Dimensions dims = Dimensions(x, y, z);
float *coordsG = NULL;
QuaternionG<float> *quatsG = NULL;
int *cumSumAtomsG = NULL;
// Results
float *resultTStransG = NULL;
float *resultTSorientG = NULL;
float *resultTSsixG = NULL;
int sum = 0;
for (int i = 0 ; i < quats.size(); ++i) {
sum += quats.at(i).size();
cumSumAtoms.push_back(sum);
for (int j = 0; j < quats.at(i).size(); ++j) {
// quatsF always has the size of the number of the current molecule.
coordsF.push_back((float) (coords.at(i).at(j)[0]));
coordsF.push_back((float) (coords.at(i).at(j)[1]));
coordsF.push_back((float) (coords.at(i).at(j)[2]));
quatsF.push_back(quats.at(i).at(j));
}
}
hipError_t err1 = hipMalloc(&quatsG, quatsF.size() * sizeof(QuaternionG<float>));
hipError_t err2 = hipMalloc(&coordsG, coordsF.size() * sizeof(float));
hipError_t err3 = hipMalloc(&cumSumAtomsG, cumSumAtoms.size() * sizeof(int));
hipError_t err4 = hipMalloc(&resultTStransG, quats.size() * sizeof(float));
hipError_t err5 = hipMalloc(&resultTSorientG, quats.size() * sizeof(float));
hipError_t err6 = hipMalloc(&resultTSsixG, quats.size() * sizeof(float));
// Error Check
if (err1 != hipSuccess || err2 != hipSuccess || err3 != hipSuccess ||
err4 != hipSuccess || err5 != hipSuccess || err6 != hipSuccess) {
hipFree(quatsG);
hipFree(coordsG);
hipFree(cumSumAtomsG);
hipFree(resultTStransG);
hipFree(resultTSorientG);
hipFree(resultTSsixG);
delete[] resultTStrans;
delete[] resultTSorient;
delete[] resultTSsix;
throw CudaException();
}
err1 = hipMemcpy(quatsG, &(quatsF[0]), quatsF.size() * sizeof(QuaternionG<float>), hipMemcpyHostToDevice);
err2 = hipMemcpy(coordsG, &(coordsF[0]), coordsF.size() * sizeof(float), hipMemcpyHostToDevice);
err3 = hipMemcpy(cumSumAtomsG, &(cumSumAtoms[0]), cumSumAtoms.size() * sizeof(int), hipMemcpyHostToDevice);
// Error Check
if (err1 != hipSuccess || err2 != hipSuccess || err3 != hipSuccess) {
hipFree(quatsG);
hipFree(coordsG);
hipFree(cumSumAtomsG);
hipFree(resultTStransG);
hipFree(resultTSorientG);
hipFree(resultTSsixG);
delete[] resultTStrans;
delete[] resultTSorient;
delete[] resultTSsix;
throw CudaException();
}
EntropyCalculator entCalc = EntropyCalculator(quatsG, coordsG, dims, cumSumAtomsG, temp, rho0, nFrames);
hipLaunchKernelGGL(( calculateEntropy), dim3((quats.size() + SLOW_BLOCKSIZE) / SLOW_BLOCKSIZE), dim3(SLOW_BLOCKSIZE), 0, 0, entCalc, resultTStransG, resultTSorientG, resultTSsixG);
hipError_t err7 = hipGetLastError();
// Error Check
if (err7 != hipSuccess) {
hipFree(quatsG);
hipFree(coordsG);
hipFree(cumSumAtomsG);
hipFree(resultTStransG);
hipFree(resultTSorientG);
hipFree(resultTSsixG);
delete[] resultTStrans;
delete[] resultTSorient;
delete[] resultTSsix;
throw CudaException();
}
// Copy back, use same errors as above for understandability.
err4 = hipMemcpy(resultTStrans, resultTStransG, quats.size() * sizeof(float), hipMemcpyDeviceToHost);
err5 = hipMemcpy(resultTSorient, resultTSorientG, quats.size() * sizeof(float), hipMemcpyDeviceToHost);
err6 = hipMemcpy(resultTSsix, resultTSsixG, quats.size() * sizeof(float), hipMemcpyDeviceToHost);
// Don't need that anymore.
hipFree(quatsG);
hipFree(coordsG);
hipFree(cumSumAtomsG);
hipFree(resultTStransG);
hipFree(resultTSorientG);
hipFree(resultTSsixG);
// Error Check
if (err4 != hipSuccess || err5 != hipSuccess || err6 != hipSuccess) {
delete[] resultTStrans;
delete[] resultTSorient;
delete[] resultTSsix;
throw CudaException();
}
std::vector<float> trans;
std::vector<float> orient;
std::vector<float> six;
for (int i = 0; i < quats.size(); ++i) {
trans.push_back(resultTStrans[i]);
orient.push_back(resultTSorient[i]);
six.push_back(resultTSsix[i]);
}
std::vector<std::vector<float> > ret;
ret.push_back(trans);
ret.push_back(orient);
ret.push_back(six);
delete[] resultTStrans;
delete[] resultTSorient;
delete[] resultTSsix;
return ret;
} | 7a0b5dc69554ce363e68bb6d1203e5eb8bf6337c.cu | #include "GistCudaSetup.cuh"
#include "GistCudaCalc.cuh"
#include "EntropyCalculator.cuh"
#include <iostream>
/**
* Allocate memory on the GPU.
* @parameter array: The pointer to the array, which will be allocated on the GPU.
* @parameter size: An integer giving the size of the array, which will be allocated.
* @throws: CudaException if a problem occurs.
*/
__host__
void allocateCuda(void **array, int size) {
// Check if the array is actually free, if not, it will be freed
// (fun fact: checking is not necessary, one could also simply free the memory).
if ((*array) != NULL) {
cudaFree(*array);
}
// If something goes wrong, throw exception
if (cudaMalloc(array, size) != cudaSuccess) {
throw CudaException();
}
}
/**
* Copy memory from the CPU to the GPU.
* @parameter array: The array from which the values shall be copied.
* @parameter array_c: The array on the device, to which the values shall be copied.
* @parameter size: The size of the stuff which will be copied.
* @throws: CudaException if something goes wrong.
*/
__host__
void copyMemoryToDevice(void *array, void *array_c, int size) {
// If something goes wrong, throw exception
// In this case only copying can go wrong.
if (cudaMemcpy(array_c, array, size, cudaMemcpyHostToDevice) != cudaSuccess) {
throw CudaException();
}
}
/**
* A simple helper function that copies a lot of stuff to the GPU (as structs).
* @parameter charge: An array holding the charges for the different atoms.
* @parameter atomtype: An array holding the integers for the atom types of the different atoms.
* @parameter solvent: An array of boolean values, holding the information whether a certain atom is solvent or solute.
* @parameter atomNumber: The total number of atoms.
* @parameter atomProps_c: A pointer to an array on the GPU, which will hold the atom properties.
* @parameter ljA: An array holding the lennard-jones parameter A for each atom type pair.
* @parameter ljB: An array holding the lennard-jones parameter B for each atom type pair.
* @parameter length: The length of the two aforementioned arrays (ljA & ljB).
* @parameter lJparams_c: A pointer to an array on the GPU, which will hold the lj parameters.
* @throws: CudaException if something bad happens.
*/
__host__
void copyMemoryToDeviceStruct(float *charge, int *atomtype, bool *solvent, int *molecule, int atomNumber, void **atomProps_c,
float *ljA, float *ljB, int length, void **lJparams_c) {
// Check if the two arrays are free. Again, this could be removed (but will stay!)
if ((*atomProps_c) != NULL) {
cudaFree(*atomProps_c);
}
if ((*lJparams_c) != NULL) {
cudaFree(*lJparams_c);
}
// Allocate the necessary memory on the GPU.
if (cudaMalloc(atomProps_c, atomNumber * sizeof(AtomProperties)) != cudaSuccess) {
throw CudaException();
}
if (cudaMalloc(lJparams_c, length * sizeof(ParamsLJ)) != cudaSuccess) {
throw CudaException();
}
// Create an array for the lennard-jones parameters.
ParamsLJ *ljp = (ParamsLJ *) malloc (length * sizeof(ParamsLJ));
// Add the lennard-jones parameters to the array.
for (int i = 0; i < length; ++i) {
ljp[i] = ParamsLJ(ljA[i], ljB[i]);
}
// Create an array for the atom properties.
AtomProperties *array = (AtomProperties *)malloc(atomNumber * sizeof(AtomProperties));
// Add the properties into the array.
for (int i = 0; i < atomNumber; ++i) {
array[i] = AtomProperties(charge[i], atomtype[i], solvent[i], molecule[i]);
}
// Copy the memory from the host to the device.
if (cudaMemcpy((*atomProps_c), array, atomNumber * sizeof(AtomProperties), cudaMemcpyHostToDevice) != cudaSuccess) {
throw CudaException();
}
if (cudaMemcpy((*lJparams_c), ljp, length * sizeof(ParamsLJ), cudaMemcpyHostToDevice) != cudaSuccess) {
throw CudaException();
}
// Free the two arrays (so that no memory leak occurs).
free(ljp);
free(array);
}
/**
* Free an array.
* @parameter array: The array you want to free.
*/
__host__
void freeCuda(void *array) {
cudaFree(array);
}
// This is coded C-like, but uses exceptions.
/**
* This starts the cuda kernel, thus it is actually a quite long function.
*/
__host__
std::vector<std::vector<float> > doActionCudaEnergy(const double *coords, int *NBindex_c, int ntypes, void *parameter, void *molecule_c,
int boxinfo, float *recip_o_box, float *ucell, int maxAtoms, float *min_c, float *max_c, int headAtomType,
float neighbourCut2, int *result_o, int *result_n, float *result_w_c, float *result_s_c,
int *result_O_c, int *result_N_c, bool doorder) {
Test *coords_c = NULL;
float *recip_b_c = NULL;
float *ucell_c = NULL;
float *result_A = (float *) calloc(maxAtoms, sizeof(float));
float *result_s = (float *) calloc(maxAtoms, sizeof(float));
// TODO: Fix this, test is actually a quite bad name here!
Test *coord_array = (Test *) calloc(maxAtoms, sizeof(Test));
// Casting
AtomProperties *sender = (AtomProperties *) molecule_c;
ParamsLJ *lennardJonesParams = (ParamsLJ *) parameter;
// Create Boxinfo and Unit cell. This is actually very important for the speed (otherwise
// there would be LOTS of access to non-local variables).
BoxInfo boxinf;
if (boxinfo != 0) {
boxinf = BoxInfo(recip_o_box, boxinfo);
}
UnitCell ucellN;
if (boxinfo == 2) {
ucellN = UnitCell(ucell);
}
// Add the coordinates to the array.
// TODO: Fix Test here also!
for (int i = 0; i < maxAtoms; ++i) {
coord_array[i] = Test(&coords[i * 3]);
}
// vectors that will return the necessary information.
std::vector<std::vector<float> > result;
std::vector<float> result_esw;
std::vector<float> result_eww;
// Allocate space on the GPU
if (cudaMalloc(&coords_c, maxAtoms * sizeof(Test)) != cudaSuccess) {
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
// Copy the data to the GPU
if (cudaMemcpy(coords_c, coord_array, maxAtoms * sizeof(Test), cudaMemcpyHostToDevice) != cudaSuccess) {
cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
if (cudaMemcpy(result_w_c, result_A, maxAtoms * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) {
cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
if (cudaMemcpy(result_s_c, result_s, maxAtoms * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) {
cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
// If the doorder calculation is used, it needs to calculate everything differently, so the slow version is used
// (this is about 10% slower).
if (doorder) {
cudaCalcEnergySlow<<< (maxAtoms + SLOW_BLOCKSIZE) / SLOW_BLOCKSIZE, SLOW_BLOCKSIZE >>> (coords_c, NBindex_c, ntypes, lennardJonesParams, sender,
boxinf, ucellN, maxAtoms, result_w_c, result_s_c, min_c, max_c,
headAtomType, neighbourCut2, result_O_c, result_N_c);
} else {
// Uses a 2D array, which is nice for memory access.
dim3 threadsPerBlock(BLOCKSIZE, BLOCKSIZE);
dim3 numBlocks((maxAtoms + threadsPerBlock.x) / threadsPerBlock.x, (maxAtoms + threadsPerBlock.y) / threadsPerBlock.y);
// The actual call of the device function
cudaCalcEnergy<<<numBlocks, threadsPerBlock>>> (coords_c, NBindex_c, ntypes, lennardJonesParams, sender,
boxinf, ucellN, maxAtoms, result_w_c, result_s_c, min_c, max_c,
headAtomType, neighbourCut2, result_O_c, result_N_c);
// Check if there was an error.
cudaError_t cudaError = cudaGetLastError();
if (cudaError != cudaSuccess) {
printf("returned %s\n", cudaGetErrorString(cudaError));
}
}
// Return the results of the calculation to the main memory
if (cudaMemcpy(result_A, result_w_c, maxAtoms * sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
if (cudaMemcpy(result_s, result_s_c, maxAtoms * sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
if (cudaMemcpy(result_o, result_O_c, maxAtoms * 4 * sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
if (cudaMemcpy(result_n, result_N_c, maxAtoms * sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
throw CudaException();
}
for (int i = 0; i < maxAtoms; ++i) {
result_eww.push_back(result_A[i]);
result_esw.push_back(result_s[i]);
}
result.push_back(result_eww);
result.push_back(result_esw);
// Free everything used in here.
cudaFree(coords_c); cudaFree(recip_b_c); cudaFree(ucell_c);
free(result_A); free(result_s); free(coord_array);
return result;
}
#ifdef DEBUG_GIST_CUDA
// Not necessary
__host__
std::vector<Quaternion<float>> shoveQuaternionsTest(std::vector<Quaternion<float> > quats) {
QuaternionG<float> *quats_c = NULL;
float *ret_c = NULL;
std::vector<Quaternion<float> > ret;
float *ret_f = new float[quats.size() * 4];
QuaternionG<float> *quats_f = new QuaternionG<float>[quats.size()];
for (int i = 0; i < quats.size(); ++i) {
quats_f[i] = quats.at(i);
}
if (cudaMalloc(&quats_c, quats.size() * sizeof(QuaternionG<float>)) != cudaSuccess) {
delete quats_f; delete ret_f;
throw CudaException();
}
if (cudaMalloc(&ret_c, quats.size() * 4 * sizeof(float)) != cudaSuccess) {
cudaFree(quats_c);
delete quats_f; delete ret_f;
throw CudaException();
}
if (cudaMemcpy(quats_c, quats_f, quats.size() * sizeof(QuaternionG<float>), cudaMemcpyHostToDevice) != cudaSuccess) {
cudaFree(quats_c); cudaFree(ret_c);
delete quats_f; delete ret_f;
throw CudaException();
}
shoveQuaternions<<< (quats.size() + BLOCKSIZE) / BLOCKSIZE, BLOCKSIZE >>> (quats_c, quats.size(), ret_c);
if (cudaMemcpy(ret_f, ret_c, quats.size() * 4 * sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(quats_c); cudaFree(ret_c);
delete quats_f; delete ret_f;
throw CudaException();
}
for (int i = 0; i < quats.size(); ++i) {
ret.push_back(Quaternion<float>(ret_f[i * 4], ret_f[i * 4 + 1], ret_f[i * 4 + 2], ret_f[i * 4 + 3]));
}
cudaFree(quats_c); cudaFree(ret_c);
delete quats_f; delete ret_f;
return ret;
}
#endif
/**
* Calculates the entropy on the GPU (this is not really necessary and does not lead to a significant speed up).
* @parameter coords: The coordinates of the different water molecules.
* @parameter x: The number of grid voxels in the x direction.
* @parameter y: The number of grid voxels in the y direction.
* @parameter z: The number of grid voxels in the z direction.
* @parameter quats: A vector object holding all the quaternions.
* @parameter temp: The temperature.
* @parameter rho0: The reference density.
* @parameter nFrames: The total number of frames.
* @return: A vector holding the values for dTStrans, dTSorient and dTSsix.
* @throws: A CudaException on error.
*/
std::vector<std::vector<float> > doActionCudaEntropy(std::vector<std::vector<Vec3> > coords, int x, int y, int z, std::vector<std::vector<Quaternion<float> >> quats, float temp, float rho0, int nFrames) {
// For the CPU
// Input (from previous calculations)
std::vector<QuaternionG<float> > quatsF;
std::vector<float> coordsF;
std::vector<int> cumSumAtoms;
// Results
float *resultTStrans = new float[quats.size()];
float *resultTSorient = new float[quats.size()];
float *resultTSsix = new float[quats.size()];
// For the GPU
// Input (from previous calculations)
Dimensions dims = Dimensions(x, y, z);
float *coordsG = NULL;
QuaternionG<float> *quatsG = NULL;
int *cumSumAtomsG = NULL;
// Results
float *resultTStransG = NULL;
float *resultTSorientG = NULL;
float *resultTSsixG = NULL;
int sum = 0;
for (int i = 0 ; i < quats.size(); ++i) {
sum += quats.at(i).size();
cumSumAtoms.push_back(sum);
for (int j = 0; j < quats.at(i).size(); ++j) {
// quatsF always has the size of the number of the current molecule.
coordsF.push_back((float) (coords.at(i).at(j)[0]));
coordsF.push_back((float) (coords.at(i).at(j)[1]));
coordsF.push_back((float) (coords.at(i).at(j)[2]));
quatsF.push_back(quats.at(i).at(j));
}
}
cudaError_t err1 = cudaMalloc(&quatsG, quatsF.size() * sizeof(QuaternionG<float>));
cudaError_t err2 = cudaMalloc(&coordsG, coordsF.size() * sizeof(float));
cudaError_t err3 = cudaMalloc(&cumSumAtomsG, cumSumAtoms.size() * sizeof(int));
cudaError_t err4 = cudaMalloc(&resultTStransG, quats.size() * sizeof(float));
cudaError_t err5 = cudaMalloc(&resultTSorientG, quats.size() * sizeof(float));
cudaError_t err6 = cudaMalloc(&resultTSsixG, quats.size() * sizeof(float));
// Error Check
if (err1 != cudaSuccess || err2 != cudaSuccess || err3 != cudaSuccess ||
err4 != cudaSuccess || err5 != cudaSuccess || err6 != cudaSuccess) {
cudaFree(quatsG);
cudaFree(coordsG);
cudaFree(cumSumAtomsG);
cudaFree(resultTStransG);
cudaFree(resultTSorientG);
cudaFree(resultTSsixG);
delete[] resultTStrans;
delete[] resultTSorient;
delete[] resultTSsix;
throw CudaException();
}
err1 = cudaMemcpy(quatsG, &(quatsF[0]), quatsF.size() * sizeof(QuaternionG<float>), cudaMemcpyHostToDevice);
err2 = cudaMemcpy(coordsG, &(coordsF[0]), coordsF.size() * sizeof(float), cudaMemcpyHostToDevice);
err3 = cudaMemcpy(cumSumAtomsG, &(cumSumAtoms[0]), cumSumAtoms.size() * sizeof(int), cudaMemcpyHostToDevice);
// Error Check
if (err1 != cudaSuccess || err2 != cudaSuccess || err3 != cudaSuccess) {
cudaFree(quatsG);
cudaFree(coordsG);
cudaFree(cumSumAtomsG);
cudaFree(resultTStransG);
cudaFree(resultTSorientG);
cudaFree(resultTSsixG);
delete[] resultTStrans;
delete[] resultTSorient;
delete[] resultTSsix;
throw CudaException();
}
EntropyCalculator entCalc = EntropyCalculator(quatsG, coordsG, dims, cumSumAtomsG, temp, rho0, nFrames);
calculateEntropy<<<(quats.size() + SLOW_BLOCKSIZE) / SLOW_BLOCKSIZE, SLOW_BLOCKSIZE>>>(entCalc, resultTStransG, resultTSorientG, resultTSsixG);
cudaError_t err7 = cudaGetLastError();
// Error Check
if (err7 != cudaSuccess) {
cudaFree(quatsG);
cudaFree(coordsG);
cudaFree(cumSumAtomsG);
cudaFree(resultTStransG);
cudaFree(resultTSorientG);
cudaFree(resultTSsixG);
delete[] resultTStrans;
delete[] resultTSorient;
delete[] resultTSsix;
throw CudaException();
}
// Copy back, use same errors as above for understandability.
err4 = cudaMemcpy(resultTStrans, resultTStransG, quats.size() * sizeof(float), cudaMemcpyDeviceToHost);
err5 = cudaMemcpy(resultTSorient, resultTSorientG, quats.size() * sizeof(float), cudaMemcpyDeviceToHost);
err6 = cudaMemcpy(resultTSsix, resultTSsixG, quats.size() * sizeof(float), cudaMemcpyDeviceToHost);
// Don't need that anymore.
cudaFree(quatsG);
cudaFree(coordsG);
cudaFree(cumSumAtomsG);
cudaFree(resultTStransG);
cudaFree(resultTSorientG);
cudaFree(resultTSsixG);
// Error Check
if (err4 != cudaSuccess || err5 != cudaSuccess || err6 != cudaSuccess) {
delete[] resultTStrans;
delete[] resultTSorient;
delete[] resultTSsix;
throw CudaException();
}
std::vector<float> trans;
std::vector<float> orient;
std::vector<float> six;
for (int i = 0; i < quats.size(); ++i) {
trans.push_back(resultTStrans[i]);
orient.push_back(resultTSorient[i]);
six.push_back(resultTSsix[i]);
}
std::vector<std::vector<float> > ret;
ret.push_back(trans);
ret.push_back(orient);
ret.push_back(six);
delete[] resultTStrans;
delete[] resultTSorient;
delete[] resultTSsix;
return ret;
} |
e7eaa1ebd925e766e377e5676ebb4bc45bbee0ac.hip | // !!! This is a file automatically generated by hipify!!!
/* Author Dunzhu Li [email protected]
*/
#include"cpml.h"
#include"../gpu.h"
#include<cstdio>
CPML::CPML(int deviceid, CPML &cpml)
{
npml=cpml.npml;
nx=cpml.nx;
nz=cpml.nz;
pml_dt=cpml.pml_dt;
pml_r=cpml.pml_r;
pml_v=cpml.pml_v;
pml_fc=cpml.pml_fc;
#define ALLOC_CPML(psi,comp,n)\
safecall(hipMalloc((void**)&psi.comp,sizeof(float)*n));\
safecall(hipMemcpy(psi.comp,cpml.psi.comp,sizeof(float)*n,hipMemcpyHostToDevice));
ALLOC_CPML(psi,Txx_x,2*npml*nz);
ALLOC_CPML(psi,Txz_x,2*npml*nz);
ALLOC_CPML(psi, U_x,2*npml*nz);
ALLOC_CPML(psi, W_x,2*npml*nz);
ALLOC_CPML(psi,Tzz_z,2*npml*nx);
ALLOC_CPML(psi,Txz_z,2*npml*nx);
ALLOC_CPML(psi, U_z,2*npml*nx);
ALLOC_CPML(psi, W_z,2*npml*nx);
ALLOC_CPML(b,Txx_x,nx);
ALLOC_CPML(b,Txz_x,nx);
ALLOC_CPML(b, U_x,nx);
ALLOC_CPML(b, W_x,nx);
ALLOC_CPML(b,Tzz_z,nz);
ALLOC_CPML(b,Txz_z,nz);
ALLOC_CPML(b, U_z,nz);
ALLOC_CPML(b, W_z,nz);
ALLOC_CPML(c,Txx_x,nx);
ALLOC_CPML(c,Txz_x,nx);
ALLOC_CPML(c, U_x,nx);
ALLOC_CPML(c, W_x,nx);
ALLOC_CPML(c,Tzz_z,nz);
ALLOC_CPML(c,Txz_z,nz);
ALLOC_CPML(c, U_z,nz);
ALLOC_CPML(c, W_z,nz);
ALLOC_CPML(k,Txx_x,nx);
ALLOC_CPML(k,Txz_x,nx);
ALLOC_CPML(k, U_x,nx);
ALLOC_CPML(k, W_x,nx);
ALLOC_CPML(k,Tzz_z,nz);
ALLOC_CPML(k,Txz_z,nz);
ALLOC_CPML(k, U_z,nz);
ALLOC_CPML(k, W_z,nz);
/*
P is the position of \partial{U}/\partial{x}, U_x,
P-------U P------U--------
| |
| |
| |
| |
0:pml_pos nx-1-pml_pos:nx-1
distance from boundary
pml_pos-i i-(nx-1-pml_pos)
for example, nx=100
looks from U grid, pml boundary is at position 11, 0 : 11, 100-1-11 : 100-1
looks from P grid, pml boundary is at position 11.5, 0 : 11,
W---------Txz
| |
| |
| |
Txx(Tzz)---U ------ pml boundary
|
|
|
|
pml boundary
0------npml-0.5
*/
}
void CPML::cu_load_restart(int deviceid,CPML &cpml)
{
#define LOAD_CPML(psi,comp,n)\
safecall(hipMemcpy(psi.comp,cpml.psi.comp,sizeof(float)*n,hipMemcpyHostToDevice));
LOAD_CPML(psi,Txx_x,2*npml*nz);
LOAD_CPML(psi,Txz_x,2*npml*nz);
LOAD_CPML(psi, U_x,2*npml*nz);
LOAD_CPML(psi, W_x,2*npml*nz);
LOAD_CPML(psi,Tzz_z,2*npml*nx);
LOAD_CPML(psi,Txz_z,2*npml*nx);
LOAD_CPML(psi, U_z,2*npml*nx);
LOAD_CPML(psi, W_z,2*npml*nx);
}
void CPML::cu_save_state(int deviceid,CPML &cpml)
{
#define CPYBACK_CPML(psi,comp,n)\
safecall(hipMemcpy(cpml.psi.comp,psi.comp,sizeof(float)*n,hipMemcpyDeviceToHost));
CPYBACK_CPML(psi,Txx_x,2*npml*nz);
CPYBACK_CPML(psi,Txz_x,2*npml*nz);
CPYBACK_CPML(psi, U_x,2*npml*nz);
CPYBACK_CPML(psi, W_x,2*npml*nz);
CPYBACK_CPML(psi,Tzz_z,2*npml*nx);
CPYBACK_CPML(psi,Txz_z,2*npml*nx);
CPYBACK_CPML(psi, U_z,2*npml*nx);
CPYBACK_CPML(psi, W_z,2*npml*nx);
}
| e7eaa1ebd925e766e377e5676ebb4bc45bbee0ac.cu | /* Author Dunzhu Li [email protected]
*/
#include"cpml.h"
#include"../gpu.h"
#include<cstdio>
CPML::CPML(int deviceid, CPML &cpml)
{
npml=cpml.npml;
nx=cpml.nx;
nz=cpml.nz;
pml_dt=cpml.pml_dt;
pml_r=cpml.pml_r;
pml_v=cpml.pml_v;
pml_fc=cpml.pml_fc;
#define ALLOC_CPML(psi,comp,n)\
safecall(cudaMalloc((void**)&psi.comp,sizeof(float)*n));\
safecall(cudaMemcpy(psi.comp,cpml.psi.comp,sizeof(float)*n,cudaMemcpyHostToDevice));
ALLOC_CPML(psi,Txx_x,2*npml*nz);
ALLOC_CPML(psi,Txz_x,2*npml*nz);
ALLOC_CPML(psi, U_x,2*npml*nz);
ALLOC_CPML(psi, W_x,2*npml*nz);
ALLOC_CPML(psi,Tzz_z,2*npml*nx);
ALLOC_CPML(psi,Txz_z,2*npml*nx);
ALLOC_CPML(psi, U_z,2*npml*nx);
ALLOC_CPML(psi, W_z,2*npml*nx);
ALLOC_CPML(b,Txx_x,nx);
ALLOC_CPML(b,Txz_x,nx);
ALLOC_CPML(b, U_x,nx);
ALLOC_CPML(b, W_x,nx);
ALLOC_CPML(b,Tzz_z,nz);
ALLOC_CPML(b,Txz_z,nz);
ALLOC_CPML(b, U_z,nz);
ALLOC_CPML(b, W_z,nz);
ALLOC_CPML(c,Txx_x,nx);
ALLOC_CPML(c,Txz_x,nx);
ALLOC_CPML(c, U_x,nx);
ALLOC_CPML(c, W_x,nx);
ALLOC_CPML(c,Tzz_z,nz);
ALLOC_CPML(c,Txz_z,nz);
ALLOC_CPML(c, U_z,nz);
ALLOC_CPML(c, W_z,nz);
ALLOC_CPML(k,Txx_x,nx);
ALLOC_CPML(k,Txz_x,nx);
ALLOC_CPML(k, U_x,nx);
ALLOC_CPML(k, W_x,nx);
ALLOC_CPML(k,Tzz_z,nz);
ALLOC_CPML(k,Txz_z,nz);
ALLOC_CPML(k, U_z,nz);
ALLOC_CPML(k, W_z,nz);
/*
P is the position of \partial{U}/\partial{x}, U_x,
P-------U P------U--------
| |
| |
| |
| |
0:pml_pos nx-1-pml_pos:nx-1
distance from boundary
pml_pos-i i-(nx-1-pml_pos)
for example, nx=100
looks from U grid, pml boundary is at position 11, 0 : 11, 100-1-11 : 100-1
looks from P grid, pml boundary is at position 11.5, 0 : 11,
W---------Txz
| |
| |
| |
Txx(Tzz)---U ------ pml boundary
|
|
|
|
pml boundary
0------npml-0.5
*/
}
void CPML::cu_load_restart(int deviceid,CPML &cpml)
{
#define LOAD_CPML(psi,comp,n)\
safecall(cudaMemcpy(psi.comp,cpml.psi.comp,sizeof(float)*n,cudaMemcpyHostToDevice));
LOAD_CPML(psi,Txx_x,2*npml*nz);
LOAD_CPML(psi,Txz_x,2*npml*nz);
LOAD_CPML(psi, U_x,2*npml*nz);
LOAD_CPML(psi, W_x,2*npml*nz);
LOAD_CPML(psi,Tzz_z,2*npml*nx);
LOAD_CPML(psi,Txz_z,2*npml*nx);
LOAD_CPML(psi, U_z,2*npml*nx);
LOAD_CPML(psi, W_z,2*npml*nx);
}
void CPML::cu_save_state(int deviceid,CPML &cpml)
{
#define CPYBACK_CPML(psi,comp,n)\
safecall(cudaMemcpy(cpml.psi.comp,psi.comp,sizeof(float)*n,cudaMemcpyDeviceToHost));
CPYBACK_CPML(psi,Txx_x,2*npml*nz);
CPYBACK_CPML(psi,Txz_x,2*npml*nz);
CPYBACK_CPML(psi, U_x,2*npml*nz);
CPYBACK_CPML(psi, W_x,2*npml*nz);
CPYBACK_CPML(psi,Tzz_z,2*npml*nx);
CPYBACK_CPML(psi,Txz_z,2*npml*nx);
CPYBACK_CPML(psi, U_z,2*npml*nx);
CPYBACK_CPML(psi, W_z,2*npml*nx);
}
|
e2dff4e88c11b7033086fd1ec895fe42d3bf9f4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/non_zero_impl.cuh"
#include <hipcub/hipcub.hpp>
constexpr size_t kNonZeroMaxDim = 10;
struct TensorShape {
size_t data[kNonZeroMaxDim];
size_t rank;
};
template <typename DataType>
struct IsZero {
__host__ __device__ __forceinline__ size_t operator()(const DataType &x) const { return x == DataType(0) ? 0 : 1; }
};
// Inspired by cub library
template <typename IndexType>
class NonZeroOutputIterator {
public:
// Required iterator traits
typedef NonZeroOutputIterator self_type;
typedef std::ptrdiff_t difference_type;
typedef void value_type;
typedef void *pointer;
typedef IndexType &reference;
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
typedef
typename thrust::detail::iterator_facade_category<thrust::any_system_tag, thrust::random_access_traversal_tag,
value_type,
reference>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
NonZeroOutputIterator(IndexType *ptr, size_t rank) : ptr_(ptr), rank_(rank) {}
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const {
// To avoid data conflict in NonZeroKernel.
return *(ptr_ + rank_ * n);
}
private:
IndexType *ptr_;
const size_t rank_;
};
template <typename IndexType>
__global__ void NonZeroKernel(IndexType *output_ptr, const size_t *output_size_ptr, const TensorShape shape) {
for (size_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < *output_size_ptr; tid += blockDim.x * gridDim.x) {
size_t fill_value = output_ptr[tid * shape.rank];
for (size_t i = 0, j = shape.rank, k = (tid + 1) * shape.rank; i < shape.rank; ++i) {
size_t base = shape.data[--j];
output_ptr[--k] = fill_value % base;
fill_value /= base;
}
}
}
template <typename DataType, typename IndexType>
void NonZero(const DataType *input_ptr, IndexType *output_ptr, size_t *output_size_ptr,
const std::vector<size_t> &input_shape, size_t input_size, const uint32_t &device_id,
hipStream_t cuda_stream) {
// Set the index (1-D base) for non-zero elements and place them into output.
// To support in place operation later, we use custom output iterator,
// which is inspired by cub library. And output_size_ptr stores the number of non-zero elements.
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
hipcub::CountingInputIterator<IndexType> count_iter(0);
hipcub::TransformInputIterator<size_t, IsZero<DataType>, const DataType *> trans_iter(input_ptr, IsZero<DataType>());
NonZeroOutputIterator<IndexType> out_iter(output_ptr, input_shape.size());
(void)hipcub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, count_iter, trans_iter, out_iter, output_size_ptr,
input_size, cuda_stream);
(void)hipMalloc(&d_temp_storage, temp_storage_bytes);
(void)hipcub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, count_iter, trans_iter, out_iter,
output_size_ptr, input_size, cuda_stream);
if (input_shape.size() > 1) {
TensorShape shape;
shape.rank = input_shape.size();
for (size_t i = 0; i < input_shape.size(); i++) {
shape.data[i] = input_shape[i];
}
// Transform output index (1-D base) to N-D base in place.
// e.g., [0, 2, 3] -> [(0, 0), (1, 0), (1, 1)] when shape is (2, 2)
hipLaunchKernelGGL(( NonZeroKernel), dim3(CUDA_BLOCKS(device_id, input_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream,
output_ptr, output_size_ptr, shape);
}
// Since hipGetLastError can return the last error from a runtime call,
// we catch the error in Launch function.
(void)hipFree(d_temp_storage);
}
template CUDA_LIB_EXPORT void NonZero<bool, int64_t>(const bool *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<uint8_t, int64_t>(const uint8_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<uint16_t, int64_t>(const uint16_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr,
const std::vector<size_t> &input_shape, size_t input_size,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<uint32_t, int64_t>(const uint32_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr,
const std::vector<size_t> &input_shape, size_t input_size,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<uint64_t, int64_t>(const uint64_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr,
const std::vector<size_t> &input_shape, size_t input_size,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<int8_t, int64_t>(const int8_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<int16_t, int64_t>(const int16_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<int32_t, int64_t>(const int32_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<int64_t, int64_t>(const int64_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<half, int64_t>(const half *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<float, int64_t>(const float *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<double, int64_t>(const double *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
hipStream_t cuda_stream);
| e2dff4e88c11b7033086fd1ec895fe42d3bf9f4d.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/non_zero_impl.cuh"
#include <cub/cub.cuh>
constexpr size_t kNonZeroMaxDim = 10;
struct TensorShape {
size_t data[kNonZeroMaxDim];
size_t rank;
};
template <typename DataType>
struct IsZero {
__host__ __device__ __forceinline__ size_t operator()(const DataType &x) const { return x == DataType(0) ? 0 : 1; }
};
// Inspired by cub library
template <typename IndexType>
class NonZeroOutputIterator {
public:
// Required iterator traits
typedef NonZeroOutputIterator self_type;
typedef std::ptrdiff_t difference_type;
typedef void value_type;
typedef void *pointer;
typedef IndexType &reference;
#if (THRUST_VERSION >= 100700)
// Use Thrust's iterator categories so we can use these iterators in Thrust 1.7 (or newer) methods
typedef
typename thrust::detail::iterator_facade_category<thrust::any_system_tag, thrust::random_access_traversal_tag,
value_type,
reference>::type iterator_category; ///< The iterator category
#else
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
#endif // THRUST_VERSION
NonZeroOutputIterator(IndexType *ptr, size_t rank) : ptr_(ptr), rank_(rank) {}
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const {
// To avoid data conflict in NonZeroKernel.
return *(ptr_ + rank_ * n);
}
private:
IndexType *ptr_;
const size_t rank_;
};
template <typename IndexType>
__global__ void NonZeroKernel(IndexType *output_ptr, const size_t *output_size_ptr, const TensorShape shape) {
for (size_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < *output_size_ptr; tid += blockDim.x * gridDim.x) {
size_t fill_value = output_ptr[tid * shape.rank];
for (size_t i = 0, j = shape.rank, k = (tid + 1) * shape.rank; i < shape.rank; ++i) {
size_t base = shape.data[--j];
output_ptr[--k] = fill_value % base;
fill_value /= base;
}
}
}
template <typename DataType, typename IndexType>
void NonZero(const DataType *input_ptr, IndexType *output_ptr, size_t *output_size_ptr,
const std::vector<size_t> &input_shape, size_t input_size, const uint32_t &device_id,
cudaStream_t cuda_stream) {
// Set the index (1-D base) for non-zero elements and place them into output.
// To support in place operation later, we use custom output iterator,
// which is inspired by cub library. And output_size_ptr stores the number of non-zero elements.
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
cub::CountingInputIterator<IndexType> count_iter(0);
cub::TransformInputIterator<size_t, IsZero<DataType>, const DataType *> trans_iter(input_ptr, IsZero<DataType>());
NonZeroOutputIterator<IndexType> out_iter(output_ptr, input_shape.size());
(void)cub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, count_iter, trans_iter, out_iter, output_size_ptr,
input_size, cuda_stream);
(void)cudaMalloc(&d_temp_storage, temp_storage_bytes);
(void)cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, count_iter, trans_iter, out_iter,
output_size_ptr, input_size, cuda_stream);
if (input_shape.size() > 1) {
TensorShape shape;
shape.rank = input_shape.size();
for (size_t i = 0; i < input_shape.size(); i++) {
shape.data[i] = input_shape[i];
}
// Transform output index (1-D base) to N-D base in place.
// e.g., [0, 2, 3] -> [(0, 0), (1, 0), (1, 1)] when shape is (2, 2)
NonZeroKernel<<<CUDA_BLOCKS(device_id, input_size), CUDA_THREADS(device_id), 0, cuda_stream>>>(
output_ptr, output_size_ptr, shape);
}
// Since cudaGetLastError can return the last error from a runtime call,
// we catch the error in Launch function.
(void)cudaFree(d_temp_storage);
}
template CUDA_LIB_EXPORT void NonZero<bool, int64_t>(const bool *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<uint8_t, int64_t>(const uint8_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<uint16_t, int64_t>(const uint16_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr,
const std::vector<size_t> &input_shape, size_t input_size,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<uint32_t, int64_t>(const uint32_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr,
const std::vector<size_t> &input_shape, size_t input_size,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<uint64_t, int64_t>(const uint64_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr,
const std::vector<size_t> &input_shape, size_t input_size,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<int8_t, int64_t>(const int8_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<int16_t, int64_t>(const int16_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<int32_t, int64_t>(const int32_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<int64_t, int64_t>(const int64_t *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<half, int64_t>(const half *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<float, int64_t>(const float *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void NonZero<double, int64_t>(const double *input_ptr, int64_t *output_ptr,
size_t *output_size_ptr, const std::vector<size_t> &input_shape,
size_t input_size, const uint32_t &device_id,
cudaStream_t cuda_stream);
|
1e92886c6c9a367daa2418ded8fc99ed79ab181f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
using namespace std;
__global__ void max(float* data, float* data_output, int size) {
__shared__ float max[128];
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
if (index >= size) {
return;
}
max[tid] = data[index];
__syncthreads();
if (tid == 0) {
for (unsigned int i = 1; i < blockDim.x; i++) {
if (max[tid] < max[i]) {
max[tid] = max[i];
}
}
data_output[blockIdx.x] = max[tid];
}
}
int main() {
int N = 128;
dim3 nThreadsPerBlock(128, 1, 1);
dim3 nBlocks(N / 128, N / 128, 1);
//float vector_intrare[N];
float *vector_intrare, *vector_iesire;
vector_intrare = new float[N];
vector_iesire = new float[N];
// Initializare vector
for (int i = 0; i < N; i++) {
vector_intrare[i] = rand() % 99 + 1;
}
for (int i = 0; i < N; i++)
{
cout << vector_intrare[i] << " ";
}
cout << endl;
float *vector_intrare_d, *vector_iesire_d;
// a_d = new float[N*M];
// b_d = new float[N*M];
hipMalloc((void**)&vector_intrare_d, N * sizeof(float));
hipMalloc((void**)&vector_iesire_d, N * sizeof(float));
hipMemcpy(vector_intrare_d, vector_intrare, N * sizeof(float), hipMemcpyHostToDevice);
max << <nBlocks, nThreadsPerBlock >> > (vector_intrare_d, vector_iesire_d, N);
hipMemcpy(vector_iesire, vector_iesire_d, N * sizeof(float), hipMemcpyDeviceToHost);
/*for (int i = 0; i < N; i++) {
cout << vector_iesire[N ] << " ";
}
cout << "\n";*/
cout << *vector_iesire;
return 0;
} | 1e92886c6c9a367daa2418ded8fc99ed79ab181f.cu | #include <iostream>
#include <math.h>
using namespace std;
__global__ void max(float* data, float* data_output, int size) {
__shared__ float max[128];
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid = threadIdx.x;
if (index >= size) {
return;
}
max[tid] = data[index];
__syncthreads();
if (tid == 0) {
for (unsigned int i = 1; i < blockDim.x; i++) {
if (max[tid] < max[i]) {
max[tid] = max[i];
}
}
data_output[blockIdx.x] = max[tid];
}
}
int main() {
int N = 128;
dim3 nThreadsPerBlock(128, 1, 1);
dim3 nBlocks(N / 128, N / 128, 1);
//float vector_intrare[N];
float *vector_intrare, *vector_iesire;
vector_intrare = new float[N];
vector_iesire = new float[N];
// Initializare vector
for (int i = 0; i < N; i++) {
vector_intrare[i] = rand() % 99 + 1;
}
for (int i = 0; i < N; i++)
{
cout << vector_intrare[i] << " ";
}
cout << endl;
float *vector_intrare_d, *vector_iesire_d;
// a_d = new float[N*M];
// b_d = new float[N*M];
cudaMalloc((void**)&vector_intrare_d, N * sizeof(float));
cudaMalloc((void**)&vector_iesire_d, N * sizeof(float));
cudaMemcpy(vector_intrare_d, vector_intrare, N * sizeof(float), cudaMemcpyHostToDevice);
max << <nBlocks, nThreadsPerBlock >> > (vector_intrare_d, vector_iesire_d, N);
cudaMemcpy(vector_iesire, vector_iesire_d, N * sizeof(float), cudaMemcpyDeviceToHost);
/*for (int i = 0; i < N; i++) {
cout << vector_iesire[N ] << " ";
}
cout << "\n";*/
cout << *vector_iesire;
return 0;
} |
d16135dfc494bfdb2c91438e999e2b4d2112510e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void _ApplyMask(
const int nthreads,
const T scale,
const T* x,
const uint8_t* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] = x[i] * (T)mask[i] * scale;
}
}
__global__ void _ApplyMask(
const int nthreads,
const float scale,
const half* x,
const uint8_t* mask,
half* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] = __float2half(__half2float(x[i]) * (float)mask[i] * scale);
}
}
template <typename T>
__global__ void _Dropout(
const int nthreads,
const uint32_t threshold,
const T scale,
const T* x,
const uint32_t* r,
uint8_t* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] = x[i] * T(mask[i] = (r[i] > threshold)) * scale;
}
}
__global__ void _Dropout(
const int nthreads,
const uint32_t threshold,
const float scale,
const half* x,
const uint32_t* r,
uint8_t* mask,
half* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] = __float2half(
__half2float(x[i]) * float(mask[i] = (r[i] > threshold)) * scale);
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void ApplyMask<float16, CUDAContext>(
const int count,
const float scale,
const float16* x,
const uint8_t* mask,
float16* y,
CUDAContext* ctx) {
hipLaunchKernelGGL(( _ApplyMask), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
count,
scale,
reinterpret_cast<const half*>(x),
mask,
reinterpret_cast<half*>(y));
}
template <>
void Dropout<float16, CUDAContext>(
const int count,
const float ratio,
const float scale,
const float16* x,
uint8_t* mask,
float16* y,
uint32_t* r,
CUDAContext* ctx) {
math::Random(count, r, ctx);
hipLaunchKernelGGL(( _Dropout), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
count,
static_cast<uint32_t>(UINT_MAX * ratio),
scale,
reinterpret_cast<const half*>(x),
r,
mask,
reinterpret_cast<half*>(y));
}
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void ApplyMask<T, CUDAContext>( \
const int count, \
const float scale, \
const T* x, \
const uint8_t* mask, \
T* y, \
CUDAContext* ctx) { \
hipLaunchKernelGGL(( _ApplyMask), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
count, convert::To<T>(scale), x, mask, y); \
} \
template <> \
void Dropout<T, CUDAContext>( \
const int count, \
const float ratio, \
const float scale, \
const T* x, \
uint8_t* mask, \
T* y, \
uint32_t* r, \
CUDAContext* ctx) { \
math::Random(count, r, ctx); \
auto threshold = static_cast<uint32_t>(UINT_MAX * ratio); \
hipLaunchKernelGGL(( _Dropout), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \
count, threshold, convert::To<T>(scale), x, r, mask, y); \
}
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_ROCM
| d16135dfc494bfdb2c91438e999e2b4d2112510e.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T>
__global__ void _ApplyMask(
const int nthreads,
const T scale,
const T* x,
const uint8_t* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] = x[i] * (T)mask[i] * scale;
}
}
__global__ void _ApplyMask(
const int nthreads,
const float scale,
const half* x,
const uint8_t* mask,
half* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] = __float2half(__half2float(x[i]) * (float)mask[i] * scale);
}
}
template <typename T>
__global__ void _Dropout(
const int nthreads,
const uint32_t threshold,
const T scale,
const T* x,
const uint32_t* r,
uint8_t* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] = x[i] * T(mask[i] = (r[i] > threshold)) * scale;
}
}
__global__ void _Dropout(
const int nthreads,
const uint32_t threshold,
const float scale,
const half* x,
const uint32_t* r,
uint8_t* mask,
half* y) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
y[i] = __float2half(
__half2float(x[i]) * float(mask[i] = (r[i] > threshold)) * scale);
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void ApplyMask<float16, CUDAContext>(
const int count,
const float scale,
const float16* x,
const uint8_t* mask,
float16* y,
CUDAContext* ctx) {
_ApplyMask<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
count,
scale,
reinterpret_cast<const half*>(x),
mask,
reinterpret_cast<half*>(y));
}
template <>
void Dropout<float16, CUDAContext>(
const int count,
const float ratio,
const float scale,
const float16* x,
uint8_t* mask,
float16* y,
uint32_t* r,
CUDAContext* ctx) {
math::Random(count, r, ctx);
_Dropout<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
count,
static_cast<uint32_t>(UINT_MAX * ratio),
scale,
reinterpret_cast<const half*>(x),
r,
mask,
reinterpret_cast<half*>(y));
}
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void ApplyMask<T, CUDAContext>( \
const int count, \
const float scale, \
const T* x, \
const uint8_t* mask, \
T* y, \
CUDAContext* ctx) { \
_ApplyMask<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
count, convert::To<T>(scale), x, mask, y); \
} \
template <> \
void Dropout<T, CUDAContext>( \
const int count, \
const float ratio, \
const float scale, \
const T* x, \
uint8_t* mask, \
T* y, \
uint32_t* r, \
CUDAContext* ctx) { \
math::Random(count, r, ctx); \
auto threshold = static_cast<uint32_t>(UINT_MAX * ratio); \
_Dropout<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \
count, threshold, convert::To<T>(scale), x, r, mask, y); \
}
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_CUDA
|
04bfbd75e5aababd125db662191db46114bdd7fd.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the spopecific language governing permissions and
limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(cudnn_deterministic);
DECLARE_uint64(conv_workspace_size_limit);
DECLARE_bool(cudnn_exhaustive_search);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
static inline bool IsVoltaOrLater(const platform::CUDADeviceContext& dev_ctx) {
return dev_ctx.GetComputeCapability() >= 70;
}
template <typename T>
class CUDNNConvOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
const Tensor* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
if (exhaustive_search && FLAGS_cudnn_deterministic) {
PADDLE_THROW(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
// Tensor Core introduced from Volta GPUs supports more faster conv op
// with FP16 in NHWC data format.
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
// We will only do data format conversion from NHWC to NCHW.
// cudnn will convert NCHW to NHWC automatically on Tensor Core.
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
VLOG(3) << "Compute ConvOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// ------------ transformed tensor -----------
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
Tensor transformed_filter_channel(filter->type());
T* output_data = nullptr;
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output,
&transformed_output);
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output.ShareDataWith(*output);
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
} else {
transformed_filter_channel.ShareDataWith(*filter);
}
output_data = transformed_output.data<T>();
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_input, &transformed_filter_channel,
&transformed_output, strides,
padding_common, dilations};
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_format = GetCudnnTensorFormat(layout);
args.handle = handle;
args.cdesc.set(dtype, padding_common, strides, dilations);
#if CUDNN_VERSION_MIN(7, 0, 1)
// cudnn 7 can support groups, no need to do it manually
// FIXME(typhoonzero): find a better way to disable groups
// rather than setting it to 1.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnSetConvolutionGroupCount(args.cdesc.desc(),
groups));
groups = 1;
#endif
args.idesc.set(transformed_input, layout_format);
args.wdesc.set(transformed_filter_channel, layout_format, groups);
args.odesc.set(transformed_output, layout_format);
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNHWC, &o_n, &o_c, &o_d,
&o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size = 0; // final workspace to allocate.
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo{};
using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
algo = search::Find<T>(args, exhaustive_search, false, 0, ctx);
workspace_size = search::GetWorkspaceSize(args, algo);
#if CUDNN_VERSION_MIN(7, 0, 1)
// when groups > 1, SearchAlgorithm find algo is CUDNN_CONVOLUTION_\
// FWD_ALGO_WINOGRAD_NONFUSED, but this kind of algorithm is unstable
// in forward computation, so change the algorithm to CUDNN_CONVOLUTION_\
// FWD_ALGO_IMPLICIT_GEMM manually.
if (ctx.Attr<int>("groups") > 1) {
algo = static_cast<cudnnConvolutionFwdAlgo_t>(0);
}
#endif
// ------------------- cudnn conv forward ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args.idesc.desc(),
input_data + i * group_offset_in, args.wdesc.desc(),
filter_data + i * group_offset_filter, args.cdesc.desc(),
algo, workspace_ptr, workspace_size, &beta,
args.odesc.desc(), output_data + i * group_offset_out));
},
workspace_size);
}
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_output, output);
}
}
};
template <typename T>
class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
}
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
}
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
VLOG(3) << "Compute ConvGradOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// transform Tensor
Tensor transformed_input_channel(input->type());
Tensor transformed_output_grad_channel(output_grad->type());
Tensor transformed_input_grad_channel(input->type());
Tensor transformed_filter_channel(filter->type());
Tensor transformed_filter_grad_channel(filter->type());
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input, output_grad, input_grad and tensor from "
"NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
if (input_grad) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
}
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output_grad_channel.ShareDataWith(*output_grad);
if (input_grad) {
transformed_input_grad_channel.ShareDataWith(*input_grad);
}
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter and filter_grad tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
if (filter_grad) {
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter_grad, &transformed_filter_grad_channel);
}
} else {
transformed_filter_channel.ShareDataWith(*filter);
if (filter_grad) {
transformed_filter_grad_channel.ShareDataWith(*filter_grad);
}
}
// update paddings
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
// cuDNN only supports padding the same amount on every dimension.
// So we create a new padded input tensor.
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input(input->type());
Tensor transformed_input_grad(input->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
transformed_input_grad.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (input_grad) {
transformed_input_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (input_grad) {
transformed_input_grad.ShareDataWith(transformed_input_grad_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* output_grad_data = transformed_output_grad_channel.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
T* filter_grad_data = nullptr;
T* input_grad_data = nullptr;
T* transformed_input_grad_data = nullptr;
ConvArgs args1{&transformed_input_grad,
&transformed_filter_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
ConvArgs args2{&transformed_input,
&transformed_filter_grad_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
auto handle = dev_ctx.cudnn_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_tensor = GetCudnnTensorFormat(layout);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNHWC, &o_n,
&o_c, &o_d, &o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n,
&o_c, &o_d, &o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn backward algorithm ---------------------
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
size_t workspace_size = 0;
int iwo_groups, c_groups;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (input_grad) {
// ------------------- cudnn descriptors ---------------------
input_grad_data = input_grad->data<T>();
transformed_input_grad_data = transformed_input_grad.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_input_grad, layout_tensor);
args1.wdesc.set(transformed_filter_channel, layout_tensor, iwo_groups);
args1.odesc.set(transformed_output_grad_channel, layout_tensor);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search1::Find<T>(args1, exhaustive_search, deterministic, 0, ctx);
workspace_size =
::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
}
if (filter_grad) {
// ------------------- cudnn descriptors ---------------------
filter_grad_data = transformed_filter_grad_channel.data<T>();
args2.handle = handle;
args2.idesc.set(transformed_input, layout_tensor);
args2.wdesc.set(transformed_filter_grad_channel, layout_tensor,
iwo_groups);
args2.odesc.set(transformed_output_grad_channel, layout_tensor);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search2::Find<T>(args2, exhaustive_search, deterministic, 1, ctx);
workspace_size = ::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
}
// ------------------- cudnn conv backward data ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
if (input_grad) {
// Because beta is zero, it is unnecessary to reset input_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args1.wdesc.desc(),
filter_data + i * group_offset_filter, args1.odesc.desc(),
output_grad_data + i * group_offset_out,
args1.cdesc.desc(), data_algo, cudnn_workspace_ptr,
workspace_size, &beta, args1.idesc.desc(),
transformed_input_grad_data + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
std::vector<int> starts(transformed_input_channel.dims().size(), 0);
std::vector<int> axes(transformed_input_channel.dims().size(), 0);
for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
transformed_input_grad_channel.mutable_data(ctx.GetPlace());
if (transformed_input_channel.dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
}
}
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_input_grad_channel, input_grad);
}
}
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
input_data + i * group_offset_in, args2.odesc.desc(),
output_grad_data + i * group_offset_out,
args2.cdesc.desc(), filter_algo, cudnn_workspace_ptr,
workspace_size, &beta, args2.wdesc.desc(),
filter_grad_data + i * group_offset_filter));
},
workspace_size);
}
if (compute_format == DataLayout::kNHWC) {
TransToChannelFirst<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_filter_grad_channel, filter_grad);
}
}
}
};
/*
* Inputs: I, W, dO, ddI, ddW
* Outputs: ddO, dW, dI
* ddo = conv(ddI, W) + conv(I, ddW)
* dW = conv_bp_filter(ddI, dO)
* dI = conv_bp_data(ddW, dO)
*/
template <typename T>
class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto X = ctx.Input<Tensor>("Input");
auto W = ctx.Input<Tensor>("Filter");
auto dO = ctx.Input<Tensor>("DOutput");
auto ddX = ctx.Input<Tensor>("DDInput");
auto ddW = ctx.Input<Tensor>("DDFilter");
auto ddO = ctx.Output<Tensor>("DDOutput");
auto dW = ctx.Output<Tensor>("DFilter");
auto dX = ctx.Output<Tensor>("DInput");
if (ddO) {
ddO->mutable_data<T>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, ddO, static_cast<T>(0));
}
if (dW) {
dW->mutable_data<T>(ctx.GetPlace());
}
if (dX) {
dX->mutable_data<T>(ctx.GetPlace());
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
Tensor transformed_X_channel(X->type());
Tensor transformed_dO_channel(dO->type());
Tensor transformed_ddX_channel(X->type());
Tensor transformed_ddO_channel(dO->type());
Tensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dX, &transformed_dX_channel);
transformed_dX_channel.mutable_data<T>(ctx.GetPlace());
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_X(X->type());
Tensor transformed_ddX(X->type());
Tensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
transformed_X =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (ddX) {
transformed_ddX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
if (dX) {
transformed_dX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = platform::CudnnDataType<T>::type;
auto handle = dev_ctx.cudnn_handle();
ConvArgs args1{&transformed_ddX, W,
&transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides,
padding_common, dilations};
ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides,
padding_common, dilations};
cudnnConvolutionFwdAlgo_t fwd_algo1 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionFwdAlgo_t fwd_algo2 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
auto layout = GetCudnnTensorFormat(DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, 0, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1);
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, 0, ctx);
workspace_size = ::max(workspace_size,
search2::GetWorkspaceSize(args2, fwd_algo2));
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search3::Find<T>(args3, exhaustive_search, deterministic, 1, ctx);
workspace_size = ::max(workspace_size,
search3::GetWorkspaceSize(args3, filter_algo));
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search4::Find<T>(args4, exhaustive_search, deterministic, 2, ctx);
workspace_size =
::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo));
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto wkspace_handle = dev_ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
ddx + i * group_offset_in, args1.wdesc.desc(),
w + i * group_offset_filter, args1.cdesc.desc(),
fwd_algo1, workspace_ptr, workspace_size, &beta,
args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (ddW) {
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args2.idesc.desc(),
x + i * group_offset_in, args2.wdesc.desc(),
ddw + i * group_offset_filter, args2.cdesc.desc(),
fwd_algo2, workspace_ptr, workspace_size, &alpha,
args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args3.idesc.desc(),
ddx + i * group_offset_in, args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(), filter_algo, workspace_ptr,
workspace_size, &beta, args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
}
if (dX && ddW) {
ddw = ddW->data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args4.wdesc.desc(),
ddw + i * group_offset_filter, args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(), data_algo, workspace_ptr,
workspace_size, &beta, args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_dX_channel, dX);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
| 04bfbd75e5aababd125db662191db46114bdd7fd.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the spopecific language governing permissions and
limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(cudnn_deterministic);
DECLARE_uint64(conv_workspace_size_limit);
DECLARE_bool(cudnn_exhaustive_search);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
static inline bool IsVoltaOrLater(const platform::CUDADeviceContext& dev_ctx) {
return dev_ctx.GetComputeCapability() >= 70;
}
template <typename T>
class CUDNNConvOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
const Tensor* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
if (exhaustive_search && FLAGS_cudnn_deterministic) {
PADDLE_THROW(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
// Tensor Core introduced from Volta GPUs supports more faster conv op
// with FP16 in NHWC data format.
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
// We will only do data format conversion from NHWC to NCHW.
// cudnn will convert NCHW to NHWC automatically on Tensor Core.
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
VLOG(3) << "Compute ConvOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// ------------ transformed tensor -----------
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
Tensor transformed_filter_channel(filter->type());
T* output_data = nullptr;
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output,
&transformed_output);
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output.ShareDataWith(*output);
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
} else {
transformed_filter_channel.ShareDataWith(*filter);
}
output_data = transformed_output.data<T>();
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_input, &transformed_filter_channel,
&transformed_output, strides,
padding_common, dilations};
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_format = GetCudnnTensorFormat(layout);
args.handle = handle;
args.cdesc.set(dtype, padding_common, strides, dilations);
#if CUDNN_VERSION_MIN(7, 0, 1)
// cudnn 7 can support groups, no need to do it manually
// FIXME(typhoonzero): find a better way to disable groups
// rather than setting it to 1.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnSetConvolutionGroupCount(args.cdesc.desc(),
groups));
groups = 1;
#endif
args.idesc.set(transformed_input, layout_format);
args.wdesc.set(transformed_filter_channel, layout_format, groups);
args.odesc.set(transformed_output, layout_format);
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNHWC, &o_n, &o_c, &o_d,
&o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size = 0; // final workspace to allocate.
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo{};
using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
algo = search::Find<T>(args, exhaustive_search, false, 0, ctx);
workspace_size = search::GetWorkspaceSize(args, algo);
#if CUDNN_VERSION_MIN(7, 0, 1)
// when groups > 1, SearchAlgorithm find algo is CUDNN_CONVOLUTION_\
// FWD_ALGO_WINOGRAD_NONFUSED, but this kind of algorithm is unstable
// in forward computation, so change the algorithm to CUDNN_CONVOLUTION_\
// FWD_ALGO_IMPLICIT_GEMM manually.
if (ctx.Attr<int>("groups") > 1) {
algo = static_cast<cudnnConvolutionFwdAlgo_t>(0);
}
#endif
// ------------------- cudnn conv forward ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args.idesc.desc(),
input_data + i * group_offset_in, args.wdesc.desc(),
filter_data + i * group_offset_filter, args.cdesc.desc(),
algo, workspace_ptr, workspace_size, &beta,
args.odesc.desc(), output_data + i * group_offset_out));
},
workspace_size);
}
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_output, output);
}
}
};
template <typename T>
class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
}
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
}
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
VLOG(3) << "Compute ConvGradOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// transform Tensor
Tensor transformed_input_channel(input->type());
Tensor transformed_output_grad_channel(output_grad->type());
Tensor transformed_input_grad_channel(input->type());
Tensor transformed_filter_channel(filter->type());
Tensor transformed_filter_grad_channel(filter->type());
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input, output_grad, input_grad and tensor from "
"NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
if (input_grad) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
}
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output_grad_channel.ShareDataWith(*output_grad);
if (input_grad) {
transformed_input_grad_channel.ShareDataWith(*input_grad);
}
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter and filter_grad tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
if (filter_grad) {
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter_grad, &transformed_filter_grad_channel);
}
} else {
transformed_filter_channel.ShareDataWith(*filter);
if (filter_grad) {
transformed_filter_grad_channel.ShareDataWith(*filter_grad);
}
}
// update paddings
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
// cuDNN only supports padding the same amount on every dimension.
// So we create a new padded input tensor.
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input(input->type());
Tensor transformed_input_grad(input->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
transformed_input_grad.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (input_grad) {
transformed_input_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (input_grad) {
transformed_input_grad.ShareDataWith(transformed_input_grad_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* output_grad_data = transformed_output_grad_channel.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
T* filter_grad_data = nullptr;
T* input_grad_data = nullptr;
T* transformed_input_grad_data = nullptr;
ConvArgs args1{&transformed_input_grad,
&transformed_filter_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
ConvArgs args2{&transformed_input,
&transformed_filter_grad_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
auto handle = dev_ctx.cudnn_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_tensor = GetCudnnTensorFormat(layout);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNHWC, &o_n,
&o_c, &o_d, &o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n,
&o_c, &o_d, &o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn backward algorithm ---------------------
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
size_t workspace_size = 0;
int iwo_groups, c_groups;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (input_grad) {
// ------------------- cudnn descriptors ---------------------
input_grad_data = input_grad->data<T>();
transformed_input_grad_data = transformed_input_grad.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_input_grad, layout_tensor);
args1.wdesc.set(transformed_filter_channel, layout_tensor, iwo_groups);
args1.odesc.set(transformed_output_grad_channel, layout_tensor);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search1::Find<T>(args1, exhaustive_search, deterministic, 0, ctx);
workspace_size =
std::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
}
if (filter_grad) {
// ------------------- cudnn descriptors ---------------------
filter_grad_data = transformed_filter_grad_channel.data<T>();
args2.handle = handle;
args2.idesc.set(transformed_input, layout_tensor);
args2.wdesc.set(transformed_filter_grad_channel, layout_tensor,
iwo_groups);
args2.odesc.set(transformed_output_grad_channel, layout_tensor);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search2::Find<T>(args2, exhaustive_search, deterministic, 1, ctx);
workspace_size = std::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
}
// ------------------- cudnn conv backward data ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
if (input_grad) {
// Because beta is zero, it is unnecessary to reset input_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args1.wdesc.desc(),
filter_data + i * group_offset_filter, args1.odesc.desc(),
output_grad_data + i * group_offset_out,
args1.cdesc.desc(), data_algo, cudnn_workspace_ptr,
workspace_size, &beta, args1.idesc.desc(),
transformed_input_grad_data + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
std::vector<int> starts(transformed_input_channel.dims().size(), 0);
std::vector<int> axes(transformed_input_channel.dims().size(), 0);
for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
transformed_input_grad_channel.mutable_data(ctx.GetPlace());
if (transformed_input_channel.dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
}
}
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_input_grad_channel, input_grad);
}
}
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
input_data + i * group_offset_in, args2.odesc.desc(),
output_grad_data + i * group_offset_out,
args2.cdesc.desc(), filter_algo, cudnn_workspace_ptr,
workspace_size, &beta, args2.wdesc.desc(),
filter_grad_data + i * group_offset_filter));
},
workspace_size);
}
if (compute_format == DataLayout::kNHWC) {
TransToChannelFirst<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_filter_grad_channel, filter_grad);
}
}
}
};
/*
* Inputs: I, W, dO, ddI, ddW
* Outputs: ddO, dW, dI
* ddo = conv(ddI, W) + conv(I, ddW)
* dW = conv_bp_filter(ddI, dO)
* dI = conv_bp_data(ddW, dO)
*/
template <typename T>
class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto X = ctx.Input<Tensor>("Input");
auto W = ctx.Input<Tensor>("Filter");
auto dO = ctx.Input<Tensor>("DOutput");
auto ddX = ctx.Input<Tensor>("DDInput");
auto ddW = ctx.Input<Tensor>("DDFilter");
auto ddO = ctx.Output<Tensor>("DDOutput");
auto dW = ctx.Output<Tensor>("DFilter");
auto dX = ctx.Output<Tensor>("DInput");
if (ddO) {
ddO->mutable_data<T>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, ddO, static_cast<T>(0));
}
if (dW) {
dW->mutable_data<T>(ctx.GetPlace());
}
if (dX) {
dX->mutable_data<T>(ctx.GetPlace());
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
Tensor transformed_X_channel(X->type());
Tensor transformed_dO_channel(dO->type());
Tensor transformed_ddX_channel(X->type());
Tensor transformed_ddO_channel(dO->type());
Tensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dX, &transformed_dX_channel);
transformed_dX_channel.mutable_data<T>(ctx.GetPlace());
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_X(X->type());
Tensor transformed_ddX(X->type());
Tensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
transformed_X =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (ddX) {
transformed_ddX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
if (dX) {
transformed_dX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = platform::CudnnDataType<T>::type;
auto handle = dev_ctx.cudnn_handle();
ConvArgs args1{&transformed_ddX, W,
&transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides,
padding_common, dilations};
ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides,
padding_common, dilations};
cudnnConvolutionFwdAlgo_t fwd_algo1 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionFwdAlgo_t fwd_algo2 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
auto layout = GetCudnnTensorFormat(DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, 0, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1);
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, 0, ctx);
workspace_size = std::max(workspace_size,
search2::GetWorkspaceSize(args2, fwd_algo2));
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search3::Find<T>(args3, exhaustive_search, deterministic, 1, ctx);
workspace_size = std::max(workspace_size,
search3::GetWorkspaceSize(args3, filter_algo));
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search4::Find<T>(args4, exhaustive_search, deterministic, 2, ctx);
workspace_size =
std::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo));
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto wkspace_handle = dev_ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
ddx + i * group_offset_in, args1.wdesc.desc(),
w + i * group_offset_filter, args1.cdesc.desc(),
fwd_algo1, workspace_ptr, workspace_size, &beta,
args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (ddW) {
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args2.idesc.desc(),
x + i * group_offset_in, args2.wdesc.desc(),
ddw + i * group_offset_filter, args2.cdesc.desc(),
fwd_algo2, workspace_ptr, workspace_size, &alpha,
args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args3.idesc.desc(),
ddx + i * group_offset_in, args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(), filter_algo, workspace_ptr,
workspace_size, &beta, args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
}
if (dX && ddW) {
ddw = ddW->data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args4.wdesc.desc(),
ddw + i * group_offset_filter, args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(), data_algo, workspace_ptr,
workspace_size, &beta, args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_dX_channel, dX);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
|
5087197f42982065c2a950739fbb4694380cb6bb.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime_api.h>
#include <sys/time.h>
#define STREAMS_NUM 8
__global__ void plus(float *a, float *b, float *c, int n, int offset) {
int i = blockIdx.x*blockDim.x + threadIdx.x + offset;
c[i] = a[i] + b[i];
}
int main(void){
int n = 1024*1024;
int size = n*sizeof(float);
struct timeval start, end;
float *a, *b;
float *c;
hipHostMalloc( (void**) &a, size ,hipHostMallocDefault );
hipHostMalloc( (void**) &b, size ,hipHostMallocDefault );
hipHostMalloc( (void**) &c, size ,hipHostMallocDefault );
float *a_d,*b_d,*c_d;
for(int i=0; i < n; i++) {
a[i] = 20.0;
b[i] = 10.0;
}
hipMalloc((void **)&a_d,size);
hipMalloc((void **)&b_d,size);
hipMalloc((void **)&c_d,size);
const int StreamSize = n / STREAMS_NUM;
hipStream_t Stream[STREAMS_NUM];
for (int i = 0; i < STREAMS_NUM; i++)
hipStreamCreate(&Stream[i]);
dim3 block(1024);
dim3 grid((n- 1)/1024 + 1);
gettimeofday( &start, NULL );
for ( int i = 0; i < STREAMS_NUM; i++) {
int Offset = i * StreamSize;
hipMemcpyAsync(&a_d[Offset], &a[Offset], StreamSize * sizeof(float), hipMemcpyHostToDevice, Stream[i]);
hipMemcpyAsync(&b_d[Offset], &b[Offset], StreamSize * sizeof(float), hipMemcpyHostToDevice, Stream[i]);
hipMemcpyAsync(&c_d[Offset], &c[Offset], StreamSize * sizeof(float), hipMemcpyHostToDevice, Stream[i]);
hipLaunchKernelGGL(( plus), dim3(grid), dim3(block), 0, 0, a_d, b_d, c_d, StreamSize, Offset);
hipMemcpyAsync(&a[Offset], &a_d[Offset], StreamSize * sizeof(float), hipMemcpyDeviceToHost, Stream[i]);
hipMemcpyAsync(&b[Offset], &b_d[Offset], StreamSize * sizeof(float), hipMemcpyDeviceToHost, Stream[i]);
hipMemcpyAsync(&c[Offset], &c_d[Offset], StreamSize * sizeof(float), hipMemcpyDeviceToHost, Stream[i]);
}
gettimeofday(&end,NULL);
int timeuseGPU = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
std::cout<<"total time use in GPU-Stream is "<<timeuseGPU<<" us "<<std::endl;
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
hipHostFree(a);
hipHostFree(b);
hipHostFree(c);
} | 5087197f42982065c2a950739fbb4694380cb6bb.cu | #include <iostream>
#include <cuda_profiler_api.h>
#include <sys/time.h>
#define STREAMS_NUM 8
__global__ void plus(float *a, float *b, float *c, int n, int offset) {
int i = blockIdx.x*blockDim.x + threadIdx.x + offset;
c[i] = a[i] + b[i];
}
int main(void){
int n = 1024*1024;
int size = n*sizeof(float);
struct timeval start, end;
float *a, *b;
float *c;
cudaHostAlloc( (void**) &a, size ,cudaHostAllocDefault );
cudaHostAlloc( (void**) &b, size ,cudaHostAllocDefault );
cudaHostAlloc( (void**) &c, size ,cudaHostAllocDefault );
float *a_d,*b_d,*c_d;
for(int i=0; i < n; i++) {
a[i] = 20.0;
b[i] = 10.0;
}
cudaMalloc((void **)&a_d,size);
cudaMalloc((void **)&b_d,size);
cudaMalloc((void **)&c_d,size);
const int StreamSize = n / STREAMS_NUM;
cudaStream_t Stream[STREAMS_NUM];
for (int i = 0; i < STREAMS_NUM; i++)
cudaStreamCreate(&Stream[i]);
dim3 block(1024);
dim3 grid((n- 1)/1024 + 1);
gettimeofday( &start, NULL );
for ( int i = 0; i < STREAMS_NUM; i++) {
int Offset = i * StreamSize;
cudaMemcpyAsync(&a_d[Offset], &a[Offset], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[i]);
cudaMemcpyAsync(&b_d[Offset], &b[Offset], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[i]);
cudaMemcpyAsync(&c_d[Offset], &c[Offset], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[i]);
plus<<<grid, block>>>(a_d, b_d, c_d, StreamSize, Offset);
cudaMemcpyAsync(&a[Offset], &a_d[Offset], StreamSize * sizeof(float), cudaMemcpyDeviceToHost, Stream[i]);
cudaMemcpyAsync(&b[Offset], &b_d[Offset], StreamSize * sizeof(float), cudaMemcpyDeviceToHost, Stream[i]);
cudaMemcpyAsync(&c[Offset], &c_d[Offset], StreamSize * sizeof(float), cudaMemcpyDeviceToHost, Stream[i]);
}
gettimeofday(&end,NULL);
int timeuseGPU = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
std::cout<<"total time use in GPU-Stream is "<<timeuseGPU<<" us "<<std::endl;
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
} |
13feb4ccb266fc79c2452ac8c60436e145e82278.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fstream>
#include "mkRay.h"
#include <time.h>
using namespace std;
//MK: FB
int nx = 1200;
int ny = 600;
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
//MK: #val val String Return ( 3)
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
//MK: Error
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) {
if (result) {
cerr << "MK: CUDA ERROR = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
//MK: ( 2-1) Ray Sphere() Hit
__device__ bool hitSphere(const vec3 ¢er, float radius, const ray &r){
vec3 oc = r.origin() - center;
float a = dot(r.direction(), r.direction());
float b = 2.0f * dot(oc, r.direction());
float c = dot(oc, oc) - radius * radius;
float discriminant = b*b - 4.0f*a*c;
return (discriminant >= 0);
}
//MK: ( 2-2) Hit
__device__ vec3 color(const ray &r){
vec3 ret = vec3(1, 0, 0);
if(hitSphere(vec3(0, 0, -1), 0.5, r)){
return ret;
}
vec3 unitDirection = unitVector(r.direction());
//MK: ( 2-3) - Single/Double Precision
float t = 0.5f * (unitDirection.y() + 1.0f);
ret = (1.0 - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
return ret;
}
__global__ void mkRender(vec3 *fb, int max_x, int max_y, vec3 lowerLeftCorner, vec3 horizontal, vec3 vertical, vec3 origin) {
//MK: Pixel ThreadId, BlockId
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//MK: Pixel FB
if((i >= max_x) || (j >= max_y)){
return;
}
//MK: FB Pixel
int pixel_index = j*max_x + i;
float u = float(i)/float(max_x);
float v = float(j)/float(max_y);
ray r(origin, lowerLeftCorner + u*horizontal + v*vertical);
fb[pixel_index] = color(r);
}
int main() {
//MK: Thread Block
int tx = 8;
int ty = 8;
cout << "MK: Rendering a " << nx << "x" << ny << " Image ";
cout << "MK: in " << tx << "x" << ty << " Thread Blocks.\n";
int num_pixels = nx*ny;
size_t fb_size = 3*num_pixels*sizeof(float);
//MK: FB (hipMallocManaged Unitifed Memory )
//MK: CPU/GPU GPU/CPU
vec3 *fb;
checkCudaErrors(hipMallocManaged((void **)&fb, fb_size));
clock_t start, stop;
start = clock();
//MK: GPU (CUDA) Thread Block, Grid
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
//MK: CUDA
hipLaunchKernelGGL(( mkRender), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny,
vec3(-2.0, -1.0, -1.0),
vec3(4.0, 0.0, 0.0),
vec3(0.0, 2.0, 0.0),
vec3(0.0, 0.0, 0.0));
checkCudaErrors(hipGetLastError());
//MK: CUDA
checkCudaErrors(hipDeviceSynchronize());
//MK:
//MK: CPU
string fileName = "Ch4_gpu.ppm";
ofstream writeFile(fileName.data());
if(writeFile.is_open()){
writeFile.flush();
writeFile << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(255.99 * fb[pixel_index].r());
int ig = int(255.99 * fb[pixel_index].g());
int ib = int(255.99 * fb[pixel_index].b());
writeFile << ir << " " << ig << " " << ib << "\n";
}
}
writeFile.close();
}
//MK: Free
checkCudaErrors(hipFree(fb));
//MK:
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
cout << "MK: GPU (CUDA) Took " << timer_seconds << " Seconds.\n";
return 0;
}
| 13feb4ccb266fc79c2452ac8c60436e145e82278.cu | #include <fstream>
#include "mkRay.h"
#include <time.h>
using namespace std;
//MK: FB 사이즈
int nx = 1200;
int ny = 600;
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
//MK: #val은 val 전체를 String으로 Return 함 (출처 3)
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
//MK: Error 위치를 파악하기 위해서 사용
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
cerr << "MK: CUDA ERROR = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
//MK: (코드 2-1) Ray와 Sphere(구)의 Hit여부를 판단
__device__ bool hitSphere(const vec3 ¢er, float radius, const ray &r){
vec3 oc = r.origin() - center;
float a = dot(r.direction(), r.direction());
float b = 2.0f * dot(oc, r.direction());
float c = dot(oc, oc) - radius * radius;
float discriminant = b*b - 4.0f*a*c;
return (discriminant >= 0);
}
//MK: (코드 2-2) 구의 Hit 여부에 따른 색상 결정
__device__ vec3 color(const ray &r){
vec3 ret = vec3(1, 0, 0);
if(hitSphere(vec3(0, 0, -1), 0.5, r)){
return ret;
}
vec3 unitDirection = unitVector(r.direction());
//MK: (코드 2-3) 중요 - Single/Double Precision 연산 설명
float t = 0.5f * (unitDirection.y() + 1.0f);
ret = (1.0 - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
return ret;
}
__global__ void mkRender(vec3 *fb, int max_x, int max_y, vec3 lowerLeftCorner, vec3 horizontal, vec3 vertical, vec3 origin) {
//MK: Pixel 위치 계산을 위해 ThreadId, BlockId를 사용함
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
//MK: 계산된 Pixel 위치가 FB사이즈 보다 크면 연산을 수행하지 않음
if((i >= max_x) || (j >= max_y)){
return;
}
//MK: FB Pixel 값 계산
int pixel_index = j*max_x + i;
float u = float(i)/float(max_x);
float v = float(j)/float(max_y);
ray r(origin, lowerLeftCorner + u*horizontal + v*vertical);
fb[pixel_index] = color(r);
}
int main() {
//MK: Thread Block 사이즈
int tx = 8;
int ty = 8;
cout << "MK: Rendering a " << nx << "x" << ny << " Image ";
cout << "MK: in " << tx << "x" << ty << " Thread Blocks.\n";
int num_pixels = nx*ny;
size_t fb_size = 3*num_pixels*sizeof(float);
//MK: FB 메모리 할당 (cudaMallocManaged 는 Unitifed Memory를 사용 할 수 있도록 함)
//MK: 필요에 따라 CPU/GPU에서 GPU/CPU로 데이터를 복사함
vec3 *fb;
checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size));
clock_t start, stop;
start = clock();
//MK: GPU (CUDA) 연산을 위해서 Thread Block, Grid 사이즈 결정
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
//MK: CUDA 함수 호출
mkRender<<<blocks, threads>>>(fb, nx, ny,
vec3(-2.0, -1.0, -1.0),
vec3(4.0, 0.0, 0.0),
vec3(0.0, 2.0, 0.0),
vec3(0.0, 0.0, 0.0));
checkCudaErrors(cudaGetLastError());
//MK: CUDA 연산이 완료되길 기다림
checkCudaErrors(cudaDeviceSynchronize());
//MK: 연산 시간과 끝 부분을 계산하여서 연산 시간을 측정함
//MK: CPU 코드와 동일하게 결과를 파일에 작성
string fileName = "Ch4_gpu.ppm";
ofstream writeFile(fileName.data());
if(writeFile.is_open()){
writeFile.flush();
writeFile << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(255.99 * fb[pixel_index].r());
int ig = int(255.99 * fb[pixel_index].g());
int ib = int(255.99 * fb[pixel_index].b());
writeFile << ir << " " << ig << " " << ib << "\n";
}
}
writeFile.close();
}
//MK: 메모리 Free
checkCudaErrors(cudaFree(fb));
//MK: 연산 시간과 끝 부분을 계산하여서 연산 시간을 측정함
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
cout << "MK: GPU (CUDA) Took " << timer_seconds << " Seconds.\n";
return 0;
}
|
32cc297e6510a6370c4336c1ff8b092ab24b0144.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/cuda/execution/kernel_impl/pad_impl.cuh"
template <typename T>
__global__ void Pad(const size_t size, const T* input, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, float pad_value, T* output) {
T pad_value_ = static_cast<T>(pad_value);
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int block_num = pos / padded_width / padded_height;
const int padded_w = pos % padded_width;
const int padded_h = pos / padded_width % padded_height;
if (padded_h - pad_top < 0 || padded_w - pad_left < 0 || padded_h - pad_top >= old_height ||
padded_w - pad_left >= old_width) {
output[pos] = pad_value_;
} else {
output[pos] = input[(block_num * old_height + padded_h - pad_top) * old_width + padded_w - pad_left];
}
}
return;
}
void CallPad(const size_t size, const void* input, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top, const int pad_left,
float pad_value, void* output, MNN::CUDARuntime *runtime, MNN::DataType data_type)
{
hipStream_t cuda_stream = runtime->stream();
int block_num = runtime->blocks_num(size);
int threads_num = runtime->threads_num();
hipDataType cuda_type = kCudaDtypeMap[data_type];
if(cuda_type == HIP_R_32F)
hipLaunchKernelGGL(( Pad), dim3(block_num), dim3(threads_num), 0, cuda_stream, size, (float*)input, num, channels, old_height, old_width,
padded_height, padded_width, pad_top, pad_left, pad_value, (float*)output);
else if(cuda_type == HIP_R_16F)
hipLaunchKernelGGL(( Pad), dim3(block_num), dim3(threads_num), 0, cuda_stream, size, (half*)input, num, channels, old_height, old_width,
padded_height, padded_width, pad_top, pad_left, pad_value, (half*)output);
else
MNN_PRINT("current only support fp32 and fp16!!!!\n");
return;
} | 32cc297e6510a6370c4336c1ff8b092ab24b0144.cu | /**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/cuda/execution/kernel_impl/pad_impl.cuh"
template <typename T>
__global__ void Pad(const size_t size, const T* input, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, float pad_value, T* output) {
T pad_value_ = static_cast<T>(pad_value);
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int block_num = pos / padded_width / padded_height;
const int padded_w = pos % padded_width;
const int padded_h = pos / padded_width % padded_height;
if (padded_h - pad_top < 0 || padded_w - pad_left < 0 || padded_h - pad_top >= old_height ||
padded_w - pad_left >= old_width) {
output[pos] = pad_value_;
} else {
output[pos] = input[(block_num * old_height + padded_h - pad_top) * old_width + padded_w - pad_left];
}
}
return;
}
void CallPad(const size_t size, const void* input, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top, const int pad_left,
float pad_value, void* output, MNN::CUDARuntime *runtime, MNN::DataType data_type)
{
cudaStream_t cuda_stream = runtime->stream();
int block_num = runtime->blocks_num(size);
int threads_num = runtime->threads_num();
cudaDataType_t cuda_type = kCudaDtypeMap[data_type];
if(cuda_type == CUDA_R_32F)
Pad<<<block_num, threads_num, 0, cuda_stream>>>(size, (float*)input, num, channels, old_height, old_width,
padded_height, padded_width, pad_top, pad_left, pad_value, (float*)output);
else if(cuda_type == CUDA_R_16F)
Pad<<<block_num, threads_num, 0, cuda_stream>>>(size, (half*)input, num, channels, old_height, old_width,
padded_height, padded_width, pad_top, pad_left, pad_value, (half*)output);
else
MNN_PRINT("current only support fp32 and fp16!!!!\n");
return;
} |
fce8819ee2f3d457cb8b03058f3fb52730da9de8.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <vector>
#include <fstream>
#include <sstream>
#include <string>
#include <assert.h>
#include "rays.hpp"
#include "sdl.hpp"
#define IFDEBUG(...)
using std::vector;
__device__ mat3f rotation3d(float ang, int l, int m, int n) {
mat3f mat;
mat.a0 = l*l*(1 - cos(ang)) + cos(ang);
mat.b0 = m*l*(1 - cos(ang)) - sin(ang) * n;
mat.c0 = n*l*(1 - cos(ang)) + sin(ang) * m;
mat.a1 = l*m*(1 - cos(ang)) + sin(ang) * n;
mat.b1 = m*m*(1 - cos(ang)) + cos(ang);
mat.c1 = n*m*(1 - cos(ang)) - sin(ang) * l;
mat.a2 = l*n*(1 - cos(ang)) - sin(ang) * m;
mat.b2 = m*n*(1 - cos(ang)) + sin(ang) * l;
mat.c2 = n*n*(1 - cos(ang)) + cos(ang);
return mat;
}
__device__ vec3f multvec3f(vec3f v, mat3f m) {
vec3f r;
r.x = v.x * m.a0 + v.y * m.b0 + v.z * m.c0;
r.y = v.x * m.a1 + v.y * m.b1 + v.z * m.c1;
r.z = v.x * m.a2 + v.y * m.b2 + v.z * m.c2;
return r;
}
void check(hipError_t err, const char* context) {
if (err != hipSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< hipGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
void loadObjFile(vector<vec3i> &triangles, vector<vec3f> &vertices, vector<vec3f> &normals) {
std::ifstream ifile("obj/teapot.obj");
std::string nil;
std::string line;
while (std::getline(ifile, line)) {
std::istringstream iss(line);
switch(line[0]) {
case 'v':
vec3f vertex;
iss >> nil >> vertex.x >> vertex.y >> vertex.z;
vertices.push_back(vertex);
break;
case 'f':
vec3i triangle;
iss >> nil >> triangle.x >> triangle.y >> triangle.z;
triangles.push_back(triangle);
break;
case 'n':
vec3f normal;
iss >> nil >> normal.x >> normal.y >> normal.z;
normals.push_back(normal);
break;
default:
break;
}
}
std::cout << "v: " << vertices.size() << std::endl;
std::cout << "n: " << normals.size() << std::endl;
std::cout << "t: " << triangles.size()/3 << std::endl;
}
__global__ void drawRay(vec3f camera, float ang, const vec3f *vertices, const vec3f *normals, int vn, const int *triangles, int tr, double fovx, double fovy, int *buffer) {
int h = blockIdx.y * 8 + threadIdx.y;
int w = blockIdx.x * 8 + threadIdx.x;
// POINT IN IMAGE PLANE
vec3f p;
p.x = w*2.0f/SCREEN_WIDTH - 1.f;
p.y = 1.0f/(tan(fovx/2));
p.z = h*2.0f/SCREEN_HEIGHT - 1.f;
mat3f camRot = rotation3d(ang, 1, 0, 0); // l=0, m=0, n=1
IFDEBUG(abs(p.x) < 1.0f || h || w);
IFDEBUG(abs(p.z) < 1.0f || h || w);
// A RAY
vec3f ray = multvec3f(p, camRot);
vec3f e1, e2;
vec3f tvec, pvec, qvec;
float mt = 0;
for(int it = 0; it < tr; it++) {
assert(3*it + 2 < tr*3);
int t0 = triangles[3*it + 0];
int t1 = triangles[3*it + 1];
int t2 = triangles[3*it + 2];
IFDEBUG(t0 < vn);
IFDEBUG(t1 < vn);
IFDEBUG(t2 < vn);
// Copy three vertices of a triangle
vec3f v0 = vertices[t0];
vec3f v1 = vertices[t1];
vec3f v2 = vertices[t2];
int intersect = 0;
double u, v, t, det, inv_det;
sub(e1, v1, v0);
sub(e2, v2, v0);
cross(pvec, ray, e2);
det = dot(pvec, e1);
if (det > -EPSILON && det < EPSILON) {
intersect = 0;
} else {
inv_det = 1.0 / det;
sub(tvec, camera, v0);
u = dot(tvec, pvec) * inv_det;
if(u < 0.0 || u > 1.0) {
intersect = 0;
} else {
cross(qvec, tvec, e1);
v = dot(ray, qvec) * inv_det;
if(v < 0.0 || u + v > 1.0)
intersect = 0;
else {
t = dot(e2, qvec) * inv_det;
intersect = t > 0;
}
}
}
if (intersect && t > mt) {
vec3f vn = normals[t0];
float mul = dot(vn, ray) / 3.0f;
int col = (int)(255.0 * mul) & 0xff;
mt = t;
buffer[w + h*SCREEN_WIDTH] = col | (col << 8) | (col << 16);
}
}
}
| fce8819ee2f3d457cb8b03058f3fb52730da9de8.cu | #include <iostream>
#include <cuda_runtime.h>
#include <vector>
#include <fstream>
#include <sstream>
#include <string>
#include <assert.h>
#include "rays.hpp"
#include "sdl.hpp"
#define IFDEBUG(...)
using std::vector;
__device__ mat3f rotation3d(float ang, int l, int m, int n) {
mat3f mat;
mat.a0 = l*l*(1 - cos(ang)) + cos(ang);
mat.b0 = m*l*(1 - cos(ang)) - sin(ang) * n;
mat.c0 = n*l*(1 - cos(ang)) + sin(ang) * m;
mat.a1 = l*m*(1 - cos(ang)) + sin(ang) * n;
mat.b1 = m*m*(1 - cos(ang)) + cos(ang);
mat.c1 = n*m*(1 - cos(ang)) - sin(ang) * l;
mat.a2 = l*n*(1 - cos(ang)) - sin(ang) * m;
mat.b2 = m*n*(1 - cos(ang)) + sin(ang) * l;
mat.c2 = n*n*(1 - cos(ang)) + cos(ang);
return mat;
}
__device__ vec3f multvec3f(vec3f v, mat3f m) {
vec3f r;
r.x = v.x * m.a0 + v.y * m.b0 + v.z * m.c0;
r.y = v.x * m.a1 + v.y * m.b1 + v.z * m.c1;
r.z = v.x * m.a2 + v.y * m.b2 + v.z * m.c2;
return r;
}
void check(cudaError_t err, const char* context) {
if (err != cudaSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< cudaGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
void loadObjFile(vector<vec3i> &triangles, vector<vec3f> &vertices, vector<vec3f> &normals) {
std::ifstream ifile("obj/teapot.obj");
std::string nil;
std::string line;
while (std::getline(ifile, line)) {
std::istringstream iss(line);
switch(line[0]) {
case 'v':
vec3f vertex;
iss >> nil >> vertex.x >> vertex.y >> vertex.z;
vertices.push_back(vertex);
break;
case 'f':
vec3i triangle;
iss >> nil >> triangle.x >> triangle.y >> triangle.z;
triangles.push_back(triangle);
break;
case 'n':
vec3f normal;
iss >> nil >> normal.x >> normal.y >> normal.z;
normals.push_back(normal);
break;
default:
break;
}
}
std::cout << "v: " << vertices.size() << std::endl;
std::cout << "n: " << normals.size() << std::endl;
std::cout << "t: " << triangles.size()/3 << std::endl;
}
__global__ void drawRay(vec3f camera, float ang, const vec3f *vertices, const vec3f *normals, int vn, const int *triangles, int tr, double fovx, double fovy, int *buffer) {
int h = blockIdx.y * 8 + threadIdx.y;
int w = blockIdx.x * 8 + threadIdx.x;
// POINT IN IMAGE PLANE
vec3f p;
p.x = w*2.0f/SCREEN_WIDTH - 1.f;
p.y = 1.0f/(tan(fovx/2));
p.z = h*2.0f/SCREEN_HEIGHT - 1.f;
mat3f camRot = rotation3d(ang, 1, 0, 0); // l=0, m=0, n=1
IFDEBUG(abs(p.x) < 1.0f || h || w);
IFDEBUG(abs(p.z) < 1.0f || h || w);
// A RAY
vec3f ray = multvec3f(p, camRot);
vec3f e1, e2;
vec3f tvec, pvec, qvec;
float mt = 0;
for(int it = 0; it < tr; it++) {
assert(3*it + 2 < tr*3);
int t0 = triangles[3*it + 0];
int t1 = triangles[3*it + 1];
int t2 = triangles[3*it + 2];
IFDEBUG(t0 < vn);
IFDEBUG(t1 < vn);
IFDEBUG(t2 < vn);
// Copy three vertices of a triangle
vec3f v0 = vertices[t0];
vec3f v1 = vertices[t1];
vec3f v2 = vertices[t2];
int intersect = 0;
double u, v, t, det, inv_det;
sub(e1, v1, v0);
sub(e2, v2, v0);
cross(pvec, ray, e2);
det = dot(pvec, e1);
if (det > -EPSILON && det < EPSILON) {
intersect = 0;
} else {
inv_det = 1.0 / det;
sub(tvec, camera, v0);
u = dot(tvec, pvec) * inv_det;
if(u < 0.0 || u > 1.0) {
intersect = 0;
} else {
cross(qvec, tvec, e1);
v = dot(ray, qvec) * inv_det;
if(v < 0.0 || u + v > 1.0)
intersect = 0;
else {
t = dot(e2, qvec) * inv_det;
intersect = t > 0;
}
}
}
if (intersect && t > mt) {
vec3f vn = normals[t0];
float mul = dot(vn, ray) / 3.0f;
int col = (int)(255.0 * mul) & 0xff;
mt = t;
buffer[w + h*SCREEN_WIDTH] = col | (col << 8) | (col << 16);
}
}
}
|
ac67898d3455b35f426f4b440dcbc6d6080eb7f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file BlackScholes_kernel.cu
* @details This file describes the kernel and device functions for a BlackScholes task.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#ifndef _PARTICLEFILTER_KERNEL_H_
#define _PARTICLEFILTER_KERNEL_H_
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(CDF[x] >= u[i]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
}
}
#endif
| ac67898d3455b35f426f4b440dcbc6d6080eb7f0.cu | /**
* @file BlackScholes_kernel.cu
* @details This file describes the kernel and device functions for a BlackScholes task.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#ifndef _PARTICLEFILTER_KERNEL_H_
#define _PARTICLEFILTER_KERNEL_H_
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(CDF[x] >= u[i]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
}
}
#endif
|
f6abdae9f12117af119af438406bad5b2234946f.hip | // !!! This is a file automatically generated by hipify!!!
#include <type_traits>
#include <cstdio>
#include <cmath>
#include <vector>
#include <cassert>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "gpu.hxx"
#include "fft_kernels.hxx"
#ifdef __BENCHMARK_FBFFT__
#include "cuda/fbfft/FBFFT.h"
#include "cuda/fbfft/FBFFTCommon.cuh"
using namespace facebook::cuda;
#endif
using namespace mgpu;
template<typename real_t>
std::vector<complex_t<real_t> >
fft_simple(const std::vector<complex_t<real_t> >& x,
int offset = -1, int stride = -1) {
if(-1 == offset) {
offset = 0;
stride = 1;
}
int n = (int)x.size();
std::vector<complex_t<real_t> > y(n);
if(2 == n) {
y[0] = x[0] + x[1];
y[1] = x[0] - x[1];
} else {
std::vector<complex_t<real_t> > x0(n / 2), x1(n / 2);
for(int k = 0; k < n / 2; ++k)
x0[k] = x[2 * k + 0], x1[k] = x[2 * k + 1];
std::vector<complex_t<real_t> > y0 =
fft_simple(x0, offset, 2 * stride);
std::vector<complex_t<real_t> > y1 =
fft_simple(x1, offset + stride, 2 * stride);
for(int k = 0; k < n / 2; ++k) {
y[k + 0 ] = y0[k] + y1[k] * W<real_t>(k, n);
y[k + n / 2] = y0[k] - y1[k] * W<real_t>(k, n);
}
}
return y;
}
template<typename real_t>
std::vector<complex_t<real_t> >
fft_real(const std::vector<real_t>& x) {
std::vector<complex_t<real_t> > x_complex(x.size());
for(size_t i = 0; i < x.size(); ++i)
x_complex[i] = complex_t<real_t>(x[i], 0);
return fft_simple(x_complex);
}
struct benchmark_t {
int n;
int size;
int batch;
int num_iterations;
double elapsed[3]; // [0] is MGPU. [1] is CUFFT.
double throughput[3]; // In billion points/sec.
double bandwidth[3]; // In GB/sec.
};
benchmark_t benchmark_test(int n, int batch, int num_iterations = 10,
bool print_transform = false) {
typedef float real_t;
benchmark_t benchmark = benchmark_t();
benchmark.n = n;
benchmark.batch = batch;
benchmark.size = n * batch;
benchmark.num_iterations = num_iterations;
std::vector<real_t> x_real(n * batch);
for(int sys = 0; sys < batch; ++sys)
for(int i = 0; i < n; ++i)
x_real[sys * n + i] = sin(i + sys + 6);
// Allocate space for both
real_t* input_global = host_to_device(x_real);
complex_t<real_t>* output_mgpu, *output_cufft;
// MGPU outputs n / 2 complex elements, by packing y[n/2].real into y[0].imag.
// CUFFT outputs n / 2 + 1 elements. We want to allow CUFFT to write complete
// cache lines, so round n / 2 + 1 up to the next cache-line size.
int odist = (n >= 32) ? (~15 & (n / 2 + 1 + 15)) : n / 2 + 1;
hipMalloc((void**)&output_mgpu, sizeof(complex_t<real_t>) * (n / 2) * batch);
hipMemset(output_mgpu, 0, sizeof(complex_t<real_t>) * (n / 2) * batch);
hipMalloc((void**)&output_cufft, sizeof(complex_t<real_t>) * odist * batch);
hipMemset(output_cufft, 0, sizeof(complex_t<real_t>) * odist * batch);
#ifdef __BENCHMARK_FBFFT__
complex_t<real_t>* output_fbfft;
hipMalloc((void**)&output_fbfft, sizeof(complex_t<real_t>) * n * batch);
hipMemset(output_fbfft, 0, sizeof(complex_t<real_t>) * n * batch);
#endif
mgpu::timer_t timer;
// MGPU
{
timer.start();
for(int it = 0; it < num_iterations; ++it)
fft_kernel(n, input_global, output_mgpu, batch);
benchmark.elapsed[0] = timer.stop();
}
// CUFFT
{
hipfftHandle plan;
int dim[] = { n, 0, 0 };
int inembed[] = { 0 };
int onembed[] = { 0 };
hipfftResult result = hipfftPlanMany(&plan, 1, dim, inembed, 1, n,
onembed, 1, odist, HIPFFT_R2C, batch);
timer.start();
for(int it = 0; it < num_iterations; ++it)
result = hipfftExecR2C(plan, input_global, (hipfftComplex*)output_cufft);
benchmark.elapsed[1] = timer.stop();
hipfftDestroy(plan);
}
#ifdef __BENCHMARK_FBFFT__
// FBFFT
{
if(n <= 256) {
// FBFFT only supports n <= 256.
int real_sizes[] = { batch, n };
DeviceTensor<float, 2> real(input_global, real_sizes);
int complex_sizes[] = { batch, n, 2 };
DeviceTensor<float, 3> complex((float*)output_fbfft, complex_sizes);
timer.start();
for(int it = 0; it < num_iterations; ++it)
fbfft::fbfft1D<1>(real, complex);
benchmark.elapsed[2] = timer.stop();
}
}
#endif
// Compute throughputs and bandwidths from elapsed time.
for(int i = 0; i < 3; ++i) {
if(benchmark.elapsed[i]) {
benchmark.throughput[i] = (double)benchmark.size * num_iterations /
benchmark.elapsed[i] / 1.0e9;
int terms = (2 == i) ? 3 : 2;
benchmark.bandwidth[i] = (double)benchmark.size * num_iterations *
terms * sizeof(real_t) / benchmark.elapsed[i] / 1.0e9;
}
}
std::vector<complex_t<real_t> > output_host =
device_to_host(output_mgpu, (n / 2) * batch);
// Print the transform and compare against the CPU-generated reference.
// Nice for debugging.
if(print_transform) {
for(int sys = 0; sys < batch; ++sys) {
std::vector<real_t> x_real2(
x_real.data() + sys * n,
x_real.data() + (sys + 1) * n);
std::vector<complex_t<real_t> > y = fft_real(x_real2);
for(int i = 0; i < n / 2; ++i) {
int index = sys * n / 2 + i;
complex_t<real_t> ref = i ? y[i] :
complex_t<real_t>(y[0].real, y[n / 2].real);
complex_t<real_t> test = output_host[index];
bool error = abs(ref - test) / abs(ref) > 1.0e-4;
printf("%5d | %5d: % 10.5f + i% 10.5f % 10.5f + i% 10.5f %c\n",
sys, i, ref.real, ref.imag, test.real, test.imag,
error ? '*' : ' ');
}
}
}
hipFree(input_global);
hipFree(output_mgpu);
hipFree(output_cufft);
#ifdef __BENCHMARK_FBFFT__
hipFree(output_fbfft);
#endif
return benchmark;
}
int main(int argc, char** argv) {
size_t freeMem, totalMem;
hipMemGetInfo(&freeMem, &totalMem);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("%s\n", prop.name);
printf("%d MB device memory.\n", (int)(totalMem / (1<< 20)));
double bandwidth = 1000.0 * prop.memoryClockRate * prop.memoryBusWidth /
8 * 2 / 1.0e9;
printf("Memory bandwidth: %d GB/s\n", (int)bandwidth);
printf("\nThroughputs reported by\n"
"1. billions of points/s.\n"
"2. GB/s of memory utilization (8 bytes/point).\n\n");
printf("\n n: MGPU CUFFT FBFFT\n");
for(int n = 4; n <= 1024; n *= 2) {
benchmark_t bench = benchmark_test(n, (64<< 20) / n, 10);
printf("%4d: %6.3fB/s %6.2fGB/s %6.3fB/s %6.2fGB/s %5.2fx %6.3fB/s %6.2fGB/s %5.2fx\n",
bench.n,
bench.throughput[0], bench.bandwidth[0],
bench.throughput[1], bench.bandwidth[1],
bench.throughput[0] / bench.throughput[1],
bench.throughput[2], bench.bandwidth[2],
bench.throughput[0] / bench.throughput[2]);
}
return 0;
}
| f6abdae9f12117af119af438406bad5b2234946f.cu |
#include <type_traits>
#include <cstdio>
#include <cmath>
#include <vector>
#include <cassert>
#include <cuda.h>
#include <cufft.h>
#include "gpu.hxx"
#include "fft_kernels.hxx"
#ifdef __BENCHMARK_FBFFT__
#include "cuda/fbfft/FBFFT.h"
#include "cuda/fbfft/FBFFTCommon.cuh"
using namespace facebook::cuda;
#endif
using namespace mgpu;
template<typename real_t>
std::vector<complex_t<real_t> >
fft_simple(const std::vector<complex_t<real_t> >& x,
int offset = -1, int stride = -1) {
if(-1 == offset) {
offset = 0;
stride = 1;
}
int n = (int)x.size();
std::vector<complex_t<real_t> > y(n);
if(2 == n) {
y[0] = x[0] + x[1];
y[1] = x[0] - x[1];
} else {
std::vector<complex_t<real_t> > x0(n / 2), x1(n / 2);
for(int k = 0; k < n / 2; ++k)
x0[k] = x[2 * k + 0], x1[k] = x[2 * k + 1];
std::vector<complex_t<real_t> > y0 =
fft_simple(x0, offset, 2 * stride);
std::vector<complex_t<real_t> > y1 =
fft_simple(x1, offset + stride, 2 * stride);
for(int k = 0; k < n / 2; ++k) {
y[k + 0 ] = y0[k] + y1[k] * W<real_t>(k, n);
y[k + n / 2] = y0[k] - y1[k] * W<real_t>(k, n);
}
}
return y;
}
template<typename real_t>
std::vector<complex_t<real_t> >
fft_real(const std::vector<real_t>& x) {
std::vector<complex_t<real_t> > x_complex(x.size());
for(size_t i = 0; i < x.size(); ++i)
x_complex[i] = complex_t<real_t>(x[i], 0);
return fft_simple(x_complex);
}
struct benchmark_t {
int n;
int size;
int batch;
int num_iterations;
double elapsed[3]; // [0] is MGPU. [1] is CUFFT.
double throughput[3]; // In billion points/sec.
double bandwidth[3]; // In GB/sec.
};
benchmark_t benchmark_test(int n, int batch, int num_iterations = 10,
bool print_transform = false) {
typedef float real_t;
benchmark_t benchmark = benchmark_t();
benchmark.n = n;
benchmark.batch = batch;
benchmark.size = n * batch;
benchmark.num_iterations = num_iterations;
std::vector<real_t> x_real(n * batch);
for(int sys = 0; sys < batch; ++sys)
for(int i = 0; i < n; ++i)
x_real[sys * n + i] = sin(i + sys + 6);
// Allocate space for both
real_t* input_global = host_to_device(x_real);
complex_t<real_t>* output_mgpu, *output_cufft;
// MGPU outputs n / 2 complex elements, by packing y[n/2].real into y[0].imag.
// CUFFT outputs n / 2 + 1 elements. We want to allow CUFFT to write complete
// cache lines, so round n / 2 + 1 up to the next cache-line size.
int odist = (n >= 32) ? (~15 & (n / 2 + 1 + 15)) : n / 2 + 1;
cudaMalloc((void**)&output_mgpu, sizeof(complex_t<real_t>) * (n / 2) * batch);
cudaMemset(output_mgpu, 0, sizeof(complex_t<real_t>) * (n / 2) * batch);
cudaMalloc((void**)&output_cufft, sizeof(complex_t<real_t>) * odist * batch);
cudaMemset(output_cufft, 0, sizeof(complex_t<real_t>) * odist * batch);
#ifdef __BENCHMARK_FBFFT__
complex_t<real_t>* output_fbfft;
cudaMalloc((void**)&output_fbfft, sizeof(complex_t<real_t>) * n * batch);
cudaMemset(output_fbfft, 0, sizeof(complex_t<real_t>) * n * batch);
#endif
mgpu::timer_t timer;
// MGPU
{
timer.start();
for(int it = 0; it < num_iterations; ++it)
fft_kernel(n, input_global, output_mgpu, batch);
benchmark.elapsed[0] = timer.stop();
}
// CUFFT
{
cufftHandle plan;
int dim[] = { n, 0, 0 };
int inembed[] = { 0 };
int onembed[] = { 0 };
cufftResult result = cufftPlanMany(&plan, 1, dim, inembed, 1, n,
onembed, 1, odist, CUFFT_R2C, batch);
timer.start();
for(int it = 0; it < num_iterations; ++it)
result = cufftExecR2C(plan, input_global, (cufftComplex*)output_cufft);
benchmark.elapsed[1] = timer.stop();
cufftDestroy(plan);
}
#ifdef __BENCHMARK_FBFFT__
// FBFFT
{
if(n <= 256) {
// FBFFT only supports n <= 256.
int real_sizes[] = { batch, n };
DeviceTensor<float, 2> real(input_global, real_sizes);
int complex_sizes[] = { batch, n, 2 };
DeviceTensor<float, 3> complex((float*)output_fbfft, complex_sizes);
timer.start();
for(int it = 0; it < num_iterations; ++it)
fbfft::fbfft1D<1>(real, complex);
benchmark.elapsed[2] = timer.stop();
}
}
#endif
// Compute throughputs and bandwidths from elapsed time.
for(int i = 0; i < 3; ++i) {
if(benchmark.elapsed[i]) {
benchmark.throughput[i] = (double)benchmark.size * num_iterations /
benchmark.elapsed[i] / 1.0e9;
int terms = (2 == i) ? 3 : 2;
benchmark.bandwidth[i] = (double)benchmark.size * num_iterations *
terms * sizeof(real_t) / benchmark.elapsed[i] / 1.0e9;
}
}
std::vector<complex_t<real_t> > output_host =
device_to_host(output_mgpu, (n / 2) * batch);
// Print the transform and compare against the CPU-generated reference.
// Nice for debugging.
if(print_transform) {
for(int sys = 0; sys < batch; ++sys) {
std::vector<real_t> x_real2(
x_real.data() + sys * n,
x_real.data() + (sys + 1) * n);
std::vector<complex_t<real_t> > y = fft_real(x_real2);
for(int i = 0; i < n / 2; ++i) {
int index = sys * n / 2 + i;
complex_t<real_t> ref = i ? y[i] :
complex_t<real_t>(y[0].real, y[n / 2].real);
complex_t<real_t> test = output_host[index];
bool error = abs(ref - test) / abs(ref) > 1.0e-4;
printf("%5d | %5d: % 10.5f + i% 10.5f % 10.5f + i% 10.5f %c\n",
sys, i, ref.real, ref.imag, test.real, test.imag,
error ? '*' : ' ');
}
}
}
cudaFree(input_global);
cudaFree(output_mgpu);
cudaFree(output_cufft);
#ifdef __BENCHMARK_FBFFT__
cudaFree(output_fbfft);
#endif
return benchmark;
}
int main(int argc, char** argv) {
size_t freeMem, totalMem;
cudaMemGetInfo(&freeMem, &totalMem);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("%s\n", prop.name);
printf("%d MB device memory.\n", (int)(totalMem / (1<< 20)));
double bandwidth = 1000.0 * prop.memoryClockRate * prop.memoryBusWidth /
8 * 2 / 1.0e9;
printf("Memory bandwidth: %d GB/s\n", (int)bandwidth);
printf("\nThroughputs reported by\n"
"1. billions of points/s.\n"
"2. GB/s of memory utilization (8 bytes/point).\n\n");
printf("\n n: MGPU CUFFT FBFFT\n");
for(int n = 4; n <= 1024; n *= 2) {
benchmark_t bench = benchmark_test(n, (64<< 20) / n, 10);
printf("%4d: %6.3fB/s %6.2fGB/s %6.3fB/s %6.2fGB/s %5.2fx %6.3fB/s %6.2fGB/s %5.2fx\n",
bench.n,
bench.throughput[0], bench.bandwidth[0],
bench.throughput[1], bench.bandwidth[1],
bench.throughput[0] / bench.throughput[1],
bench.throughput[2], bench.bandwidth[2],
bench.throughput[0] / bench.throughput[2]);
}
return 0;
}
|
b3ddb88eb9bd78e51928254658a7a3033ed27f68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_argmax.h"
#include "hip/hip_fp16.h"
#include <cfloat>
namespace anakin {
namespace saber {
template <typename Dtype, unsigned int blockSize>
__global__ void top1(const Dtype* in_data,
const int height,
const int width,
bool out_max_val,
Dtype* out_data) {
if (blockIdx.x > height) {
return;
}
__shared__ Dtype share_data[CUDA_NUM_THREADS];
__shared__ Dtype share_index[CUDA_NUM_THREADS];
int offset = blockIdx.x * width;
const Dtype* tmp_in_data = in_data + offset;
Dtype minest = -1e32;
int index = threadIdx.x;
if (index < width) {
Dtype result = tmp_in_data[index];
Dtype idx = index;
for (int tid = index + blockDim.x; tid < width; tid += blockDim.x) {
if (result < tmp_in_data[tid]) {
result = tmp_in_data[tid];
idx = tid;
}
}
share_data[index] = result;
share_index[index] = idx;
} else {
share_data[index] = minest;
share_index[index] = -1;
}
__syncthreads();
if (blockSize >= 512) {
if (index < 256) {
int index2 = index + 256;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 256) {
if (index < 128) {
int index2 = index + 128;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 128) {
if (index < 64) {
int index2 = index + 64;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (index < 32) {
volatile Dtype *vmax = share_data;
volatile Dtype *vindex = share_index;
if (blockSize >= 64) {
int index2 = index + 32;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 32) {
int index2 = index + 16;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 16) {
int index2 = index + 8;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 8) {
int index2 = index + 4;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 4) {
int index2 = index + 2;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 2) {
int index2 = index + 1;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
}
__syncthreads();
if (index == 0) {
if (!out_max_val) {
out_data[blockIdx.x] = share_index[0];
} else {
out_data[2 * blockIdx.x] = share_index[0];
out_data[2 * blockIdx.x + 1] = share_data[0];
}
}
}
template <typename Dtype, unsigned int blockSize>
__global__ void block_top1(const Dtype* in_data,
const int height,
const int width,
Dtype* out_data,
Dtype* out_index) {
__shared__ Dtype share_data[CUDA_NUM_THREADS];
__shared__ Dtype share_index[CUDA_NUM_THREADS];
int offset = blockIdx.y * width + blockIdx.x * CUDA_NUM_THREADS;
const Dtype* tmp_in_data = in_data + offset;
Dtype minest = -1e32;
int index = threadIdx.x;
if (index + blockIdx.x * CUDA_NUM_THREADS < width) {
share_data[index] = tmp_in_data[index];
share_index[index] = threadIdx.x;
} else {
share_data[index] = minest;
share_index[index] = -1;
}
__syncthreads();
if (blockSize >= 512) {
if (index < 256) {
int index2 = index + 256;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 256) {
if (index < 128) {
int index2 = index + 128;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 128) {
if (index < 64) {
int index2 = index + 64;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (index < 32) {
volatile Dtype *vmax = share_data;
volatile Dtype *vindex = share_index;
if (blockSize >= 64) {
int index2 = index + 64;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 32) {
int index2 = index + 16;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 16) {
int index2 = index + 8;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 8) {
int index2 = index + 4;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 4) {
int index2 = index + 2;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 2) {
int index2 = index + 1;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
}
__syncthreads();
if (index == 0) {
int offset = blockIdx.y * gridDim.x + blockIdx.x;
out_data[offset] = share_data[0];
out_index[offset] = share_index[0];
}
}
template <typename Dtype, unsigned int blockSize>
__global__ void top1(const Dtype* in_data,
const Dtype* in_index,
const int height,
const int width,
bool out_max_val,
Dtype* out_data) {
__shared__ Dtype share_data[blockSize];
__shared__ Dtype share_index[blockSize];
int offset = blockIdx.x * width;
const Dtype* tmp_in_data = in_data + offset;
const Dtype* tmp_in_index = in_index + offset;
Dtype minest = -1e10;
int index = threadIdx.x;
if (index < width) {
Dtype result = tmp_in_data[index];
Dtype idx = index;
for (int tid = index + blockDim.x; tid < width; tid += blockDim.x) {
if (result < tmp_in_data[tid]) {
result = tmp_in_data[tid];
idx = tid;
}
}
share_data[index] = result;
share_index[index] = idx;
} else {
share_data[index] = minest;
share_index[index] = -1;
}
__syncthreads();
if (blockSize >= 512) {
if (index < 256) {
int index2 = index + 256;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 256) {
if (index < 128) {
int index2 = index + 128;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 128) {
if (index < 64) {
int index2 = index + 64;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (index < 32) {
volatile Dtype *vmax = share_data;
volatile Dtype *vindex = share_index;
if (blockSize >= 64) {
int index2 = index + 64;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 32) {
int index2 = index + 16;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 16) {
int index2 = index + 8;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 8) {
int index2 = index + 4;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 4) {
int index2 = index + 2;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 2) {
int index2 = index + 1;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
}
__syncthreads();
if (index == 0) {
int block_id = share_index[0];
if (!out_max_val) {
out_data[blockIdx.x] = block_id * CUDA_NUM_THREADS + tmp_in_index[block_id];
} else {
out_data[2 * blockIdx.x] = block_id * CUDA_NUM_THREADS + tmp_in_index[block_id];
out_data[2 * blockIdx.x + 1] = share_data[0];
}
}
}
template <typename Dtype>
__global__ void top1_channel(const Dtype* in_data,
const int num,
const int channel,
const int inner_dim,
bool out_max_val,
Dtype* out_data) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id > num * inner_dim) {
return;
}
int num_id = thread_id / inner_dim;
int inner_id = thread_id % inner_dim;
//
const Dtype* tmp_in_data = in_data + num_id * channel * inner_dim + inner_id;
Dtype max_data = tmp_in_data[0];
Dtype max_id = 0;
for (int i = 1; i < channel; i++) {
Dtype data = tmp_in_data[i*inner_dim];
if (max_data < data) {
max_data = data;
max_id = i;
}
}
out_data[thread_id] = out_max_val ? max_data : max_id;
}
template <typename Dtype>
__device__ void adjust_small_heap_with_index_device(Dtype* tree, Dtype *index_tree,int index,int length){
while (2 * index + 1 < length) {
int child_index = 2 * index + 1;
if (child_index + 1 < length && tree[child_index + 1] < tree[child_index]) {
child_index++;
}
if (tree[index] > tree[child_index]) {
Dtype t = tree[index];
tree[index] = tree[child_index];
tree[child_index] = t;
int t_index = index_tree[index];
index_tree[index] = index_tree[child_index];
index_tree[child_index] = t_index;
index = child_index;
} else {
break;
}
}
}
template <typename Dtype>
__device__ void adjust_small_heap_with_index_device_stride(Dtype* tree, Dtype *index_tree,int index,int length, int stride){
while (2 * index + 1 < length) {
int child_index = 2 * index + 1;
int off_0 = child_index * stride;
int off_1 = (child_index + 1) * stride;
if (child_index + 1 < length && tree[off_1] < tree[off_0]) {
child_index++;
}
int child_off = child_index * stride;
int cur_off = index * stride;
if (tree[cur_off] > tree[child_off]) {
Dtype t = tree[cur_off];
tree[cur_off] = tree[child_off];
tree[child_off] = t;
int t_index = index_tree[cur_off];
index_tree[cur_off] = index_tree[child_off];
index_tree[child_off] = t_index;
index = child_index;
} else {
break;
}
}
}
template <typename Dtype>
__global__ void topk_channel(const Dtype* in_data,
const int num,
const int channel,
const int inner_dim,
const int top_k,
bool out_max_val,
Dtype* out_data) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id > num * inner_dim) {
return;
}
int num_id = thread_id / inner_dim;
int inner_id = thread_id % inner_dim;
//
const Dtype* tmp_in_data = in_data + num_id * channel * inner_dim + inner_id;
extern __shared__ Dtype trees[];
Dtype* small_heap_tree = trees + threadIdx.x * top_k;
Dtype* tree_index = trees + threadIdx.x * top_k + blockDim.x * top_k;
for (int i = 0; i < top_k; i++) {
small_heap_tree[i] = -FLT_MAX;
tree_index[i] = -1;
}
for (int i = 0; i < channel; i++) {
Dtype data = tmp_in_data[i*inner_dim];
if (data > small_heap_tree[0]) {
small_heap_tree[0] = data;
tree_index[0] = i;
adjust_small_heap_with_index_device(small_heap_tree, tree_index, 0, top_k);
}
}
Dtype* out = out_data + num_id * top_k * inner_dim + inner_id;
for (int i = top_k - 1; i >= 0; i--) {
out[i * inner_dim] = out_max_val ? small_heap_tree[0] : tree_index[0];
small_heap_tree[0] = FLT_MAX;
tree_index[0] = -1;
adjust_small_heap_with_index_device(small_heap_tree, tree_index, 0, top_k);
}
}
/*trees size is k * blockDim.x*/
template <typename Dtype, int blockSize>
__global__ void topk_heap_shared(Dtype *out_data, int n, int inner_dim, const int top_k, const bool out_max_val, const Dtype *in_data){
extern __shared__ Dtype trees[];
const int block_id = blockIdx.x;
const int tid = threadIdx.x;
Dtype *cur_tree = trees + tid * top_k;
Dtype *cur_tree_index = cur_tree + top_k * blockDim.x;
for (int i = 0; i < top_k; i++){
cur_tree[i] = -FLT_MAX;
cur_tree_index[i] = -1;
}
/*build small heap for every thread in one picture*/
const Dtype* in = in_data + block_id * inner_dim;
for (int i = tid; i < inner_dim; i += blockDim.x){
if (in[i] > cur_tree[0]) {
cur_tree[0] = in[i];
cur_tree_index[0] = i;
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) {
Dtype* next_tree = cur_tree + 256 * top_k;
Dtype* next_tree_index = cur_tree_index + 256 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
Dtype* next_tree = cur_tree + 128 * top_k;
Dtype* next_tree_index = cur_tree_index + 128 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
Dtype* next_tree = cur_tree + 64 * top_k;
Dtype* next_tree_index = cur_tree_index + 64 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 64) {
if (tid < 32) {
Dtype* next_tree = cur_tree + 32 * top_k;
Dtype* next_tree_index = cur_tree_index + 32 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 32) {
if (tid < 16) {
Dtype* next_tree = cur_tree + 16 * top_k;
Dtype* next_tree_index = cur_tree_index + 16 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 16) {
if (tid < 8) {
Dtype* next_tree = cur_tree + 8 * top_k;
Dtype* next_tree_index = cur_tree_index + 8 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 8) {
if (tid < 4) {
Dtype* next_tree = cur_tree + 4 * top_k;
Dtype* next_tree_index = cur_tree_index + 4 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 4) {
if (tid < 2) {
Dtype* next_tree = cur_tree + 2 * top_k;
Dtype* next_tree_index = cur_tree_index + 2 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 2) {
if (tid < 1) {
Dtype* next_tree = cur_tree + 1 * top_k;
Dtype* next_tree_index = cur_tree_index + 1 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (tid == 0) {
int stride = out_max_val ? block_id * top_k * 2 : block_id * top_k;
Dtype* out = out_data + stride;
for (int i = top_k - 1; i >= 0; i--) {
if (!out_max_val) {
out[i] = cur_tree_index[0];
} else {
out[i + top_k] = cur_tree[0];
out[i] = cur_tree_index[0];
}
cur_tree[0] = FLT_MAX;
cur_tree_index[0] = -1;
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
template <>
SaberStatus SaberArgmax<NV, AK_FLOAT>::dispatch( \
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ArgmaxParam<NV>& param) {
hipStream_t cuda_stream = this->_ctx->get_compute_stream();
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
const OpDataType * in_data = (const OpDataType*)inputs[0]->data();
OpDataType * out_data = (OpDataType*)outputs[0]->mutable_data();
int outer_dim = inputs[0]->count_valid(0, param.axis);
if (param.has_axis) {
int count = inputs[0]->count_valid(0, inputs[0]->dims());
int dim = inputs[0]->shape()[param.axis];
int inner_dim = inputs[0]->count_valid(param.axis + 1, inputs[0]->dims());
int total_threads = count / dim;
if (param.top_k == 1) {
hipLaunchKernelGGL(( top1_channel<OpDataType>), dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, in_data, outer_dim, dim, inner_dim, param.out_max_val, out_data);
} else {
hipLaunchKernelGGL(( topk_channel<OpDataType>), dim3(CUDA_GET_BLOCKS(total_threads)), dim3(CUDA_NUM_THREADS), 2 * sizeof(OpDataType) * CUDA_NUM_THREADS * param.top_k, cuda_stream, in_data, outer_dim, dim, inner_dim, param.top_k, param.out_max_val, out_data);
}
} else {
int inner_dim = inputs[0]->count_valid(1, inputs[0]->dims());
int outer_dim = inputs[0]->num();
if (param.top_k == 1) {
if (inner_dim / CUDA_NUM_THREADS < 10) {
int block_size = pow(2, ceil(log(inner_dim) / log(2)));
block_size = block_size > CUDA_NUM_THREADS ? CUDA_NUM_THREADS : block_size;
hipLaunchKernelGGL(( top1<OpDataType, CUDA_NUM_THREADS>), dim3(outer_dim), dim3(CUDA_NUM_THREADS), 0, cuda_stream, in_data, outer_dim, inner_dim, param.out_max_val, out_data);
} else {
int block_num = CUDA_GET_BLOCKS(inner_dim);
dim3 grid(block_num, outer_dim);
hipLaunchKernelGGL(( block_top1<OpDataType, CUDA_NUM_THREADS>), dim3(grid), dim3(CUDA_NUM_THREADS), 0, cuda_stream, in_data, outer_dim, inner_dim, (OpDataType*)_block_max_value.mutable_data(), (OpDataType*)_block_max_index.mutable_data());
hipLaunchKernelGGL(( top1<OpDataType, CUDA_NUM_THREADS>), dim3(outer_dim), dim3(CUDA_NUM_THREADS), 0, cuda_stream, (OpDataType*)_block_max_value.data(), (OpDataType*)_block_max_index.data(), outer_dim, block_num, param.out_max_val, out_data);
}
} else {
hipLaunchKernelGGL(( topk_heap_shared<OpDataType, CUDA_NUM_THREADS>), dim3(outer_dim), dim3(CUDA_NUM_THREADS), sizeof(OpDataType) * CUDA_NUM_THREADS * param.top_k * 2, cuda_stream, out_data, outer_dim, inner_dim, param.top_k, param.out_max_val, in_data);
}
}
return SaberSuccess;
}
DEFINE_OP_TEMPLATE(SaberArgmax, ArgmaxParam, NV, AK_INT8);
DEFINE_OP_TEMPLATE(SaberArgmax, ArgmaxParam, NV, AK_HALF);
}
}
| b3ddb88eb9bd78e51928254658a7a3033ed27f68.cu | #include "saber/funcs/impl/cuda/saber_argmax.h"
#include "cuda_fp16.h"
#include <cfloat>
namespace anakin {
namespace saber {
template <typename Dtype, unsigned int blockSize>
__global__ void top1(const Dtype* in_data,
const int height,
const int width,
bool out_max_val,
Dtype* out_data) {
if (blockIdx.x > height) {
return;
}
__shared__ Dtype share_data[CUDA_NUM_THREADS];
__shared__ Dtype share_index[CUDA_NUM_THREADS];
int offset = blockIdx.x * width;
const Dtype* tmp_in_data = in_data + offset;
Dtype minest = -1e32;
int index = threadIdx.x;
if (index < width) {
Dtype result = tmp_in_data[index];
Dtype idx = index;
for (int tid = index + blockDim.x; tid < width; tid += blockDim.x) {
if (result < tmp_in_data[tid]) {
result = tmp_in_data[tid];
idx = tid;
}
}
share_data[index] = result;
share_index[index] = idx;
} else {
share_data[index] = minest;
share_index[index] = -1;
}
__syncthreads();
if (blockSize >= 512) {
if (index < 256) {
int index2 = index + 256;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 256) {
if (index < 128) {
int index2 = index + 128;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 128) {
if (index < 64) {
int index2 = index + 64;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (index < 32) {
volatile Dtype *vmax = share_data;
volatile Dtype *vindex = share_index;
if (blockSize >= 64) {
int index2 = index + 32;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 32) {
int index2 = index + 16;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 16) {
int index2 = index + 8;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 8) {
int index2 = index + 4;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 4) {
int index2 = index + 2;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 2) {
int index2 = index + 1;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
}
__syncthreads();
if (index == 0) {
if (!out_max_val) {
out_data[blockIdx.x] = share_index[0];
} else {
out_data[2 * blockIdx.x] = share_index[0];
out_data[2 * blockIdx.x + 1] = share_data[0];
}
}
}
template <typename Dtype, unsigned int blockSize>
__global__ void block_top1(const Dtype* in_data,
const int height,
const int width,
Dtype* out_data,
Dtype* out_index) {
__shared__ Dtype share_data[CUDA_NUM_THREADS];
__shared__ Dtype share_index[CUDA_NUM_THREADS];
int offset = blockIdx.y * width + blockIdx.x * CUDA_NUM_THREADS;
const Dtype* tmp_in_data = in_data + offset;
Dtype minest = -1e32;
int index = threadIdx.x;
if (index + blockIdx.x * CUDA_NUM_THREADS < width) {
share_data[index] = tmp_in_data[index];
share_index[index] = threadIdx.x;
} else {
share_data[index] = minest;
share_index[index] = -1;
}
__syncthreads();
if (blockSize >= 512) {
if (index < 256) {
int index2 = index + 256;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 256) {
if (index < 128) {
int index2 = index + 128;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 128) {
if (index < 64) {
int index2 = index + 64;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (index < 32) {
volatile Dtype *vmax = share_data;
volatile Dtype *vindex = share_index;
if (blockSize >= 64) {
int index2 = index + 64;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 32) {
int index2 = index + 16;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 16) {
int index2 = index + 8;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 8) {
int index2 = index + 4;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 4) {
int index2 = index + 2;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 2) {
int index2 = index + 1;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
}
__syncthreads();
if (index == 0) {
int offset = blockIdx.y * gridDim.x + blockIdx.x;
out_data[offset] = share_data[0];
out_index[offset] = share_index[0];
}
}
template <typename Dtype, unsigned int blockSize>
__global__ void top1(const Dtype* in_data,
const Dtype* in_index,
const int height,
const int width,
bool out_max_val,
Dtype* out_data) {
__shared__ Dtype share_data[blockSize];
__shared__ Dtype share_index[blockSize];
int offset = blockIdx.x * width;
const Dtype* tmp_in_data = in_data + offset;
const Dtype* tmp_in_index = in_index + offset;
Dtype minest = -1e10;
int index = threadIdx.x;
if (index < width) {
Dtype result = tmp_in_data[index];
Dtype idx = index;
for (int tid = index + blockDim.x; tid < width; tid += blockDim.x) {
if (result < tmp_in_data[tid]) {
result = tmp_in_data[tid];
idx = tid;
}
}
share_data[index] = result;
share_index[index] = idx;
} else {
share_data[index] = minest;
share_index[index] = -1;
}
__syncthreads();
if (blockSize >= 512) {
if (index < 256) {
int index2 = index + 256;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 256) {
if (index < 128) {
int index2 = index + 128;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (blockSize >= 128) {
if (index < 64) {
int index2 = index + 64;
if (share_data[index2] > share_data[index]) {
share_data[index] = share_data[index2];
share_index[index] = share_index[index2];
}
}
__syncthreads();
}
if (index < 32) {
volatile Dtype *vmax = share_data;
volatile Dtype *vindex = share_index;
if (blockSize >= 64) {
int index2 = index + 64;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 32) {
int index2 = index + 16;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 16) {
int index2 = index + 8;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 8) {
int index2 = index + 4;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 4) {
int index2 = index + 2;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
if (blockSize >= 2) {
int index2 = index + 1;
if (vmax[index2] > vmax[index]) {
vmax[index] = vmax[index2];
vindex[index] = vindex[index2];
}
}
}
__syncthreads();
if (index == 0) {
int block_id = share_index[0];
if (!out_max_val) {
out_data[blockIdx.x] = block_id * CUDA_NUM_THREADS + tmp_in_index[block_id];
} else {
out_data[2 * blockIdx.x] = block_id * CUDA_NUM_THREADS + tmp_in_index[block_id];
out_data[2 * blockIdx.x + 1] = share_data[0];
}
}
}
template <typename Dtype>
__global__ void top1_channel(const Dtype* in_data,
const int num,
const int channel,
const int inner_dim,
bool out_max_val,
Dtype* out_data) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id > num * inner_dim) {
return;
}
int num_id = thread_id / inner_dim;
int inner_id = thread_id % inner_dim;
//
const Dtype* tmp_in_data = in_data + num_id * channel * inner_dim + inner_id;
Dtype max_data = tmp_in_data[0];
Dtype max_id = 0;
for (int i = 1; i < channel; i++) {
Dtype data = tmp_in_data[i*inner_dim];
if (max_data < data) {
max_data = data;
max_id = i;
}
}
out_data[thread_id] = out_max_val ? max_data : max_id;
}
template <typename Dtype>
__device__ void adjust_small_heap_with_index_device(Dtype* tree, Dtype *index_tree,int index,int length){
while (2 * index + 1 < length) {
int child_index = 2 * index + 1;
if (child_index + 1 < length && tree[child_index + 1] < tree[child_index]) {
child_index++;
}
if (tree[index] > tree[child_index]) {
Dtype t = tree[index];
tree[index] = tree[child_index];
tree[child_index] = t;
int t_index = index_tree[index];
index_tree[index] = index_tree[child_index];
index_tree[child_index] = t_index;
index = child_index;
} else {
break;
}
}
}
template <typename Dtype>
__device__ void adjust_small_heap_with_index_device_stride(Dtype* tree, Dtype *index_tree,int index,int length, int stride){
while (2 * index + 1 < length) {
int child_index = 2 * index + 1;
int off_0 = child_index * stride;
int off_1 = (child_index + 1) * stride;
if (child_index + 1 < length && tree[off_1] < tree[off_0]) {
child_index++;
}
int child_off = child_index * stride;
int cur_off = index * stride;
if (tree[cur_off] > tree[child_off]) {
Dtype t = tree[cur_off];
tree[cur_off] = tree[child_off];
tree[child_off] = t;
int t_index = index_tree[cur_off];
index_tree[cur_off] = index_tree[child_off];
index_tree[child_off] = t_index;
index = child_index;
} else {
break;
}
}
}
template <typename Dtype>
__global__ void topk_channel(const Dtype* in_data,
const int num,
const int channel,
const int inner_dim,
const int top_k,
bool out_max_val,
Dtype* out_data) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id > num * inner_dim) {
return;
}
int num_id = thread_id / inner_dim;
int inner_id = thread_id % inner_dim;
//
const Dtype* tmp_in_data = in_data + num_id * channel * inner_dim + inner_id;
extern __shared__ Dtype trees[];
Dtype* small_heap_tree = trees + threadIdx.x * top_k;
Dtype* tree_index = trees + threadIdx.x * top_k + blockDim.x * top_k;
for (int i = 0; i < top_k; i++) {
small_heap_tree[i] = -FLT_MAX;
tree_index[i] = -1;
}
for (int i = 0; i < channel; i++) {
Dtype data = tmp_in_data[i*inner_dim];
if (data > small_heap_tree[0]) {
small_heap_tree[0] = data;
tree_index[0] = i;
adjust_small_heap_with_index_device(small_heap_tree, tree_index, 0, top_k);
}
}
Dtype* out = out_data + num_id * top_k * inner_dim + inner_id;
for (int i = top_k - 1; i >= 0; i--) {
out[i * inner_dim] = out_max_val ? small_heap_tree[0] : tree_index[0];
small_heap_tree[0] = FLT_MAX;
tree_index[0] = -1;
adjust_small_heap_with_index_device(small_heap_tree, tree_index, 0, top_k);
}
}
/*trees size is k * blockDim.x*/
template <typename Dtype, int blockSize>
__global__ void topk_heap_shared(Dtype *out_data, int n, int inner_dim, const int top_k, const bool out_max_val, const Dtype *in_data){
extern __shared__ Dtype trees[];
const int block_id = blockIdx.x;
const int tid = threadIdx.x;
Dtype *cur_tree = trees + tid * top_k;
Dtype *cur_tree_index = cur_tree + top_k * blockDim.x;
for (int i = 0; i < top_k; i++){
cur_tree[i] = -FLT_MAX;
cur_tree_index[i] = -1;
}
/*build small heap for every thread in one picture*/
const Dtype* in = in_data + block_id * inner_dim;
for (int i = tid; i < inner_dim; i += blockDim.x){
if (in[i] > cur_tree[0]) {
cur_tree[0] = in[i];
cur_tree_index[0] = i;
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) {
Dtype* next_tree = cur_tree + 256 * top_k;
Dtype* next_tree_index = cur_tree_index + 256 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
Dtype* next_tree = cur_tree + 128 * top_k;
Dtype* next_tree_index = cur_tree_index + 128 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
Dtype* next_tree = cur_tree + 64 * top_k;
Dtype* next_tree_index = cur_tree_index + 64 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 64) {
if (tid < 32) {
Dtype* next_tree = cur_tree + 32 * top_k;
Dtype* next_tree_index = cur_tree_index + 32 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 32) {
if (tid < 16) {
Dtype* next_tree = cur_tree + 16 * top_k;
Dtype* next_tree_index = cur_tree_index + 16 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 16) {
if (tid < 8) {
Dtype* next_tree = cur_tree + 8 * top_k;
Dtype* next_tree_index = cur_tree_index + 8 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 8) {
if (tid < 4) {
Dtype* next_tree = cur_tree + 4 * top_k;
Dtype* next_tree_index = cur_tree_index + 4 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 4) {
if (tid < 2) {
Dtype* next_tree = cur_tree + 2 * top_k;
Dtype* next_tree_index = cur_tree_index + 2 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (blockSize >= 2) {
if (tid < 1) {
Dtype* next_tree = cur_tree + 1 * top_k;
Dtype* next_tree_index = cur_tree_index + 1 * top_k;
for (int i = 0; i < top_k; i++) {
if (next_tree[i] > cur_tree[0]) {
cur_tree[0] = next_tree[i];
cur_tree_index[0] = next_tree_index[i];
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
__syncthreads();
}
if (tid == 0) {
int stride = out_max_val ? block_id * top_k * 2 : block_id * top_k;
Dtype* out = out_data + stride;
for (int i = top_k - 1; i >= 0; i--) {
if (!out_max_val) {
out[i] = cur_tree_index[0];
} else {
out[i + top_k] = cur_tree[0];
out[i] = cur_tree_index[0];
}
cur_tree[0] = FLT_MAX;
cur_tree_index[0] = -1;
adjust_small_heap_with_index_device(cur_tree, cur_tree_index, 0, top_k);
}
}
}
template <>
SaberStatus SaberArgmax<NV, AK_FLOAT>::dispatch( \
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
ArgmaxParam<NV>& param) {
cudaStream_t cuda_stream = this->_ctx->get_compute_stream();
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
const OpDataType * in_data = (const OpDataType*)inputs[0]->data();
OpDataType * out_data = (OpDataType*)outputs[0]->mutable_data();
int outer_dim = inputs[0]->count_valid(0, param.axis);
if (param.has_axis) {
int count = inputs[0]->count_valid(0, inputs[0]->dims());
int dim = inputs[0]->shape()[param.axis];
int inner_dim = inputs[0]->count_valid(param.axis + 1, inputs[0]->dims());
int total_threads = count / dim;
if (param.top_k == 1) {
top1_channel<OpDataType><<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, cuda_stream>>>(in_data, outer_dim, dim, inner_dim, param.out_max_val, out_data);
} else {
topk_channel<OpDataType><<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 2 * sizeof(OpDataType) * CUDA_NUM_THREADS * param.top_k, cuda_stream>>>(in_data, outer_dim, dim, inner_dim, param.top_k, param.out_max_val, out_data);
}
} else {
int inner_dim = inputs[0]->count_valid(1, inputs[0]->dims());
int outer_dim = inputs[0]->num();
if (param.top_k == 1) {
if (inner_dim / CUDA_NUM_THREADS < 10) {
int block_size = pow(2, ceil(log(inner_dim) / log(2)));
block_size = block_size > CUDA_NUM_THREADS ? CUDA_NUM_THREADS : block_size;
top1<OpDataType, CUDA_NUM_THREADS><<<outer_dim, CUDA_NUM_THREADS, 0, cuda_stream>>>(in_data, outer_dim, inner_dim, param.out_max_val, out_data);
} else {
int block_num = CUDA_GET_BLOCKS(inner_dim);
dim3 grid(block_num, outer_dim);
block_top1<OpDataType, CUDA_NUM_THREADS><<<grid, CUDA_NUM_THREADS, 0, cuda_stream>>>(in_data, outer_dim, inner_dim, (OpDataType*)_block_max_value.mutable_data(), (OpDataType*)_block_max_index.mutable_data());
top1<OpDataType, CUDA_NUM_THREADS><<<outer_dim, CUDA_NUM_THREADS, 0, cuda_stream>>>((OpDataType*)_block_max_value.data(), (OpDataType*)_block_max_index.data(), outer_dim, block_num, param.out_max_val, out_data);
}
} else {
topk_heap_shared<OpDataType, CUDA_NUM_THREADS><<<outer_dim, CUDA_NUM_THREADS, sizeof(OpDataType) * CUDA_NUM_THREADS * param.top_k * 2, cuda_stream>>>(out_data, outer_dim, inner_dim, param.top_k, param.out_max_val, in_data);
}
}
return SaberSuccess;
}
DEFINE_OP_TEMPLATE(SaberArgmax, ArgmaxParam, NV, AK_INT8);
DEFINE_OP_TEMPLATE(SaberArgmax, ArgmaxParam, NV, AK_HALF);
}
}
|
ccc35d7767b2c7489c4ffb74f8f5aaf0a3674cb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file YOLOPassThruLayer_device.cu
* @date 2018-01-03
* @author moonhoen lee
* @brief
* @details
*/
#include "YOLOPassThruLayer.h"
#include "PropMgmt.h"
#include "MemoryMgmt.h"
using namespace std;
#define EPSILON 0.000001
template <typename Dtype>
__global__ void reorgKernel(const Dtype* input, int size, int channels, int rows, int cols,
int stride, bool forward, Dtype* output) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
int curBatch = idx / channels;
int curChannel = idx % channels;
int topChannels = channels / (stride * stride);
int topRows = rows * stride;
int topCols = cols * stride;
for (int h = 0; h < cols; h++) {
for (int w = 0; w < rows; w++) {
int bottomIndex = w + rows * (h + cols * (curChannel + channels * curBatch));
int c2 = curChannel % topChannels;
int offset = curChannel / topChannels;
int w2 = w * stride + offset % stride;
int h2 = h * stride + offset / stride;
int topIndex = w2 + topRows * (h2 + topCols * (c2 + topChannels * curBatch));
if (forward)
output[topIndex] = input[bottomIndex];
else
output[bottomIndex] = input[topIndex];
}
}
}
template <typename Dtype>
void YOLOPassThruLayer<Dtype>::reshape() {
Layer<Dtype>::_adjustInputShape();
if (!Layer<Dtype>::_isInputShapeChanged(0))
return;
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
uint32_t batches = inputShape[0];
uint32_t channels = inputShape[1];
uint32_t rows = inputShape[2];
uint32_t cols = inputShape[3];
this->_inputShape[0] = {batches, channels, rows, cols};
int strideInt = SLPROP(YOLOPassThru, stride);
bool reverseBool = SLPROP(YOLOPassThru, stride);
if (reverseBool) {
SASSERT0(rows % strideInt == 0);
SASSERT0(cols % strideInt == 0);
this->_outputData[0]->reshape({batches, uint32_t(channels * strideInt * strideInt),
uint32_t(rows / strideInt), uint32_t(cols / strideInt)});
} else {
SASSERT0(channels % (strideInt * strideInt) == 0);
this->_outputData[0]->reshape({batches, uint32_t(channels / strideInt / strideInt),
uint32_t(rows * strideInt), uint32_t(cols * strideInt)});
}
}
template <typename Dtype>
void YOLOPassThruLayer<Dtype>::feedforward() {
reshape();
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
int channelCount = inputShape[1];
int rowCount = inputShape[2];
int colCount = inputShape[3];
int size = batchCount * channelCount;
const Dtype *inputData = this->_inputData[0]->device_data();
Dtype *outputData = this->_outputData[0]->mutable_device_data();
int strideInt = SLPROP(YOLOPassThru, stride);
bool reverseBool = SLPROP(YOLOPassThru, reverse);
hipLaunchKernelGGL(( reorgKernel<Dtype>), dim3(SOOOA_GET_BLOCKS(size)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
inputData, size, channelCount, rowCount, colCount, strideInt, reverseBool, outputData);
}
template <typename Dtype>
void YOLOPassThruLayer<Dtype>::backpropagation() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
int channelCount = inputShape[1];
int rowCount = inputShape[2];
int colCount = inputShape[3];
int size = batchCount * channelCount;
const Dtype *outputGrad = this->_outputData[0]->device_grad();
Dtype *inputGrad = this->_inputData[0]->mutable_device_grad();
int strideInt = SLPROP(YOLOPassThru, stride);
bool reverseBool = SLPROP(YOLOPassThru, reverse);
int outChannels = channelCount * strideInt * strideInt;
int outRows = rowCount / strideInt;
int outCols = colCount / strideInt;
hipLaunchKernelGGL(( reorgKernel<Dtype>), dim3(SOOOA_GET_BLOCKS(size)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
outputGrad, size, outChannels, outRows, outCols, strideInt, !reverseBool, inputGrad);
}
/****************************************************************************
* layer callback functions
****************************************************************************/
template<typename Dtype>
void* YOLOPassThruLayer<Dtype>::initLayer() {
YOLOPassThruLayer* layer = NULL;
SNEW(layer, YOLOPassThruLayer<Dtype>);
SASSUME0(layer != NULL);
return (void*)layer;
}
template<typename Dtype>
void YOLOPassThruLayer<Dtype>::destroyLayer(void* instancePtr) {
YOLOPassThruLayer<Dtype>* layer = (YOLOPassThruLayer<Dtype>*)instancePtr;
SDELETE(layer);
}
template<typename Dtype>
void YOLOPassThruLayer<Dtype>::setInOutTensor(void* instancePtr, void* tensorPtr,
bool isInput, int index) {
SASSERT0(index == 0);
YOLOPassThruLayer<Dtype>* layer = (YOLOPassThruLayer<Dtype>*)instancePtr;
if (isInput) {
SASSERT0(layer->_inputData.size() == 0);
layer->_inputData.push_back((Data<Dtype>*)tensorPtr);
} else {
SASSERT0(layer->_outputData.size() == 0);
layer->_outputData.push_back((Data<Dtype>*)tensorPtr);
}
}
template<typename Dtype>
bool YOLOPassThruLayer<Dtype>::allocLayerTensors(void* instancePtr) {
YOLOPassThruLayer<Dtype>* layer = (YOLOPassThruLayer<Dtype>*)instancePtr;
layer->reshape();
return true;
}
template<typename Dtype>
void YOLOPassThruLayer<Dtype>::forwardTensor(void* instancePtr, int miniBatchIdx) {
YOLOPassThruLayer<Dtype>* layer = (YOLOPassThruLayer<Dtype>*)instancePtr;
layer->feedforward();
}
template<typename Dtype>
void YOLOPassThruLayer<Dtype>::backwardTensor(void* instancePtr) {
YOLOPassThruLayer<Dtype>* layer = (YOLOPassThruLayer<Dtype>*)instancePtr;
layer->backpropagation();
}
template<typename Dtype>
void YOLOPassThruLayer<Dtype>::learnTensor(void* instancePtr) {
SASSERT0(false);
}
template<typename Dtype>
bool YOLOPassThruLayer<Dtype>::checkShape(vector<TensorShape> inputShape,
vector<TensorShape> &outputShape) {
/* reverse false row, col stride
reverse true channel stride
*/
// input tensor 1
if (SLPROP_BASE(input).size() != 1) {
SEVENT_PUSH(NETWORK_EVENT_TYPE_eVALIDATION,
"YOLO Passthrough Layer should have only 1 input tensors but it has %d tensors",
(int)SLPROP_BASE(input).size());
return false;
}
// output tensor 1
if (SLPROP_BASE(output).size() != 1) {
SEVENT_PUSH(NETWORK_EVENT_TYPE_eVALIDATION,
"YOLO Passthrough Layer should have only 1 output tensors but it has %d tensors",
(int)SLPROP_BASE(output).size());
return false;
}
if (inputShape[0].N <= 0 || inputShape[0].C <= 0 ||
inputShape[0].H <= 0 || inputShape[0].W <= 0)
return false;
const int strideInt = SLPROP(YOLOPassThru, stride);
const bool reverseBool = SLPROP(YOLOPassThru, reverse);
int outChannel;
int outHeight;
int outWidth;
if (reverseBool) {
if (inputShape[0].H % strideInt != 0 || inputShape[0].W % strideInt != 0) {
SEVENT_PUSH(NETWORK_EVENT_TYPE_eVALIDATION,
"YOLO Passthrough Layer input tensor's rows and cols should be multiple of "
"stride if reverse case. but rows=%d, cols=%d and stride=%d.",
(int)inputShape[0].W, (int)inputShape[0].H, strideInt);
return false;
} else {
outChannel = inputShape[0].C * (strideInt * strideInt);
outHeight = inputShape[0].H / strideInt;
outWidth = inputShape[0].W / strideInt;
}
} else {
if (inputShape[0].C % (strideInt * strideInt) != 0) {
SEVENT_PUSH(NETWORK_EVENT_TYPE_eVALIDATION,
"YOLO Passthrough Layer input tensor's channels should be multiple of "
"(stride ** 2) if not reverse case. but channels=%d and stride=%d.",
(int)inputShape[0].C, strideInt);
return false;
} else {
outChannel = inputShape[0].C / (strideInt * strideInt);
outHeight = inputShape[0].H * strideInt;
outWidth = inputShape[0].W * strideInt;
}
}
if (outChannel <= 0 || outHeight <= 0 || outWidth <= 0)
return false;
TensorShape outputShape1;
outputShape1.N = inputShape[0].N;
outputShape1.C = outChannel;
outputShape1.H = outHeight;
outputShape1.W = outWidth;
outputShape.push_back(outputShape1);
return true;
}
template<typename Dtype>
uint64_t YOLOPassThruLayer<Dtype>::calcGPUSize(vector<TensorShape> inputShape) {
return 0UL;
}
template class YOLOPassThruLayer<float>;
| ccc35d7767b2c7489c4ffb74f8f5aaf0a3674cb1.cu | /**
* @file YOLOPassThruLayer_device.cu
* @date 2018-01-03
* @author moonhoen lee
* @brief
* @details
*/
#include "YOLOPassThruLayer.h"
#include "PropMgmt.h"
#include "MemoryMgmt.h"
using namespace std;
#define EPSILON 0.000001
template <typename Dtype>
__global__ void reorgKernel(const Dtype* input, int size, int channels, int rows, int cols,
int stride, bool forward, Dtype* output) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
int curBatch = idx / channels;
int curChannel = idx % channels;
int topChannels = channels / (stride * stride);
int topRows = rows * stride;
int topCols = cols * stride;
for (int h = 0; h < cols; h++) {
for (int w = 0; w < rows; w++) {
int bottomIndex = w + rows * (h + cols * (curChannel + channels * curBatch));
int c2 = curChannel % topChannels;
int offset = curChannel / topChannels;
int w2 = w * stride + offset % stride;
int h2 = h * stride + offset / stride;
int topIndex = w2 + topRows * (h2 + topCols * (c2 + topChannels * curBatch));
if (forward)
output[topIndex] = input[bottomIndex];
else
output[bottomIndex] = input[topIndex];
}
}
}
template <typename Dtype>
void YOLOPassThruLayer<Dtype>::reshape() {
Layer<Dtype>::_adjustInputShape();
if (!Layer<Dtype>::_isInputShapeChanged(0))
return;
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
uint32_t batches = inputShape[0];
uint32_t channels = inputShape[1];
uint32_t rows = inputShape[2];
uint32_t cols = inputShape[3];
this->_inputShape[0] = {batches, channels, rows, cols};
int strideInt = SLPROP(YOLOPassThru, stride);
bool reverseBool = SLPROP(YOLOPassThru, stride);
if (reverseBool) {
SASSERT0(rows % strideInt == 0);
SASSERT0(cols % strideInt == 0);
this->_outputData[0]->reshape({batches, uint32_t(channels * strideInt * strideInt),
uint32_t(rows / strideInt), uint32_t(cols / strideInt)});
} else {
SASSERT0(channels % (strideInt * strideInt) == 0);
this->_outputData[0]->reshape({batches, uint32_t(channels / strideInt / strideInt),
uint32_t(rows * strideInt), uint32_t(cols * strideInt)});
}
}
template <typename Dtype>
void YOLOPassThruLayer<Dtype>::feedforward() {
reshape();
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
int channelCount = inputShape[1];
int rowCount = inputShape[2];
int colCount = inputShape[3];
int size = batchCount * channelCount;
const Dtype *inputData = this->_inputData[0]->device_data();
Dtype *outputData = this->_outputData[0]->mutable_device_data();
int strideInt = SLPROP(YOLOPassThru, stride);
bool reverseBool = SLPROP(YOLOPassThru, reverse);
reorgKernel<Dtype><<<SOOOA_GET_BLOCKS(size), SOOOA_CUDA_NUM_THREADS>>>(
inputData, size, channelCount, rowCount, colCount, strideInt, reverseBool, outputData);
}
template <typename Dtype>
void YOLOPassThruLayer<Dtype>::backpropagation() {
const vector<uint32_t>& inputShape = this->_inputData[0]->getShape();
int batchCount = inputShape[0];
int channelCount = inputShape[1];
int rowCount = inputShape[2];
int colCount = inputShape[3];
int size = batchCount * channelCount;
const Dtype *outputGrad = this->_outputData[0]->device_grad();
Dtype *inputGrad = this->_inputData[0]->mutable_device_grad();
int strideInt = SLPROP(YOLOPassThru, stride);
bool reverseBool = SLPROP(YOLOPassThru, reverse);
int outChannels = channelCount * strideInt * strideInt;
int outRows = rowCount / strideInt;
int outCols = colCount / strideInt;
reorgKernel<Dtype><<<SOOOA_GET_BLOCKS(size), SOOOA_CUDA_NUM_THREADS>>>(
outputGrad, size, outChannels, outRows, outCols, strideInt, !reverseBool, inputGrad);
}
/****************************************************************************
* layer callback functions
****************************************************************************/
template<typename Dtype>
void* YOLOPassThruLayer<Dtype>::initLayer() {
YOLOPassThruLayer* layer = NULL;
SNEW(layer, YOLOPassThruLayer<Dtype>);
SASSUME0(layer != NULL);
return (void*)layer;
}
template<typename Dtype>
void YOLOPassThruLayer<Dtype>::destroyLayer(void* instancePtr) {
YOLOPassThruLayer<Dtype>* layer = (YOLOPassThruLayer<Dtype>*)instancePtr;
SDELETE(layer);
}
template<typename Dtype>
void YOLOPassThruLayer<Dtype>::setInOutTensor(void* instancePtr, void* tensorPtr,
bool isInput, int index) {
SASSERT0(index == 0);
YOLOPassThruLayer<Dtype>* layer = (YOLOPassThruLayer<Dtype>*)instancePtr;
if (isInput) {
SASSERT0(layer->_inputData.size() == 0);
layer->_inputData.push_back((Data<Dtype>*)tensorPtr);
} else {
SASSERT0(layer->_outputData.size() == 0);
layer->_outputData.push_back((Data<Dtype>*)tensorPtr);
}
}
template<typename Dtype>
bool YOLOPassThruLayer<Dtype>::allocLayerTensors(void* instancePtr) {
YOLOPassThruLayer<Dtype>* layer = (YOLOPassThruLayer<Dtype>*)instancePtr;
layer->reshape();
return true;
}
template<typename Dtype>
void YOLOPassThruLayer<Dtype>::forwardTensor(void* instancePtr, int miniBatchIdx) {
YOLOPassThruLayer<Dtype>* layer = (YOLOPassThruLayer<Dtype>*)instancePtr;
layer->feedforward();
}
template<typename Dtype>
void YOLOPassThruLayer<Dtype>::backwardTensor(void* instancePtr) {
YOLOPassThruLayer<Dtype>* layer = (YOLOPassThruLayer<Dtype>*)instancePtr;
layer->backpropagation();
}
template<typename Dtype>
void YOLOPassThruLayer<Dtype>::learnTensor(void* instancePtr) {
SASSERT0(false);
}
template<typename Dtype>
bool YOLOPassThruLayer<Dtype>::checkShape(vector<TensorShape> inputShape,
vector<TensorShape> &outputShape) {
/* reverse가 false일 때 row, col이 stride의 배수여야 함
reverse가 true일 때 channel이 stride의 제곱의 배수여야 함
*/
// input tensor 1개
if (SLPROP_BASE(input).size() != 1) {
SEVENT_PUSH(NETWORK_EVENT_TYPE_eVALIDATION,
"YOLO Passthrough Layer should have only 1 input tensors but it has %d tensors",
(int)SLPROP_BASE(input).size());
return false;
}
// output tensor 1개
if (SLPROP_BASE(output).size() != 1) {
SEVENT_PUSH(NETWORK_EVENT_TYPE_eVALIDATION,
"YOLO Passthrough Layer should have only 1 output tensors but it has %d tensors",
(int)SLPROP_BASE(output).size());
return false;
}
if (inputShape[0].N <= 0 || inputShape[0].C <= 0 ||
inputShape[0].H <= 0 || inputShape[0].W <= 0)
return false;
const int strideInt = SLPROP(YOLOPassThru, stride);
const bool reverseBool = SLPROP(YOLOPassThru, reverse);
int outChannel;
int outHeight;
int outWidth;
if (reverseBool) {
if (inputShape[0].H % strideInt != 0 || inputShape[0].W % strideInt != 0) {
SEVENT_PUSH(NETWORK_EVENT_TYPE_eVALIDATION,
"YOLO Passthrough Layer input tensor's rows and cols should be multiple of "
"stride if reverse case. but rows=%d, cols=%d and stride=%d.",
(int)inputShape[0].W, (int)inputShape[0].H, strideInt);
return false;
} else {
outChannel = inputShape[0].C * (strideInt * strideInt);
outHeight = inputShape[0].H / strideInt;
outWidth = inputShape[0].W / strideInt;
}
} else {
if (inputShape[0].C % (strideInt * strideInt) != 0) {
SEVENT_PUSH(NETWORK_EVENT_TYPE_eVALIDATION,
"YOLO Passthrough Layer input tensor's channels should be multiple of "
"(stride ** 2) if not reverse case. but channels=%d and stride=%d.",
(int)inputShape[0].C, strideInt);
return false;
} else {
outChannel = inputShape[0].C / (strideInt * strideInt);
outHeight = inputShape[0].H * strideInt;
outWidth = inputShape[0].W * strideInt;
}
}
if (outChannel <= 0 || outHeight <= 0 || outWidth <= 0)
return false;
TensorShape outputShape1;
outputShape1.N = inputShape[0].N;
outputShape1.C = outChannel;
outputShape1.H = outHeight;
outputShape1.W = outWidth;
outputShape.push_back(outputShape1);
return true;
}
template<typename Dtype>
uint64_t YOLOPassThruLayer<Dtype>::calcGPUSize(vector<TensorShape> inputShape) {
return 0UL;
}
template class YOLOPassThruLayer<float>;
|
5d61fb49cadbfe9f9697610f0dab17373f10a817.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu/mblas/matrix_functions.h"
#include "gpu/mblas/handles.h"
using namespace std;
namespace amunmt {
namespace GPU {
namespace mblas {
thread_local CudaStreamHandler CudaStreamHandler::instance_;
thread_local CublasHandler CublasHandler::instance_;
Matrix& Swap(Matrix& Out, Matrix& In) {
Out.swap(In);
return Out;
}
__global__ void gMean(MatrixWrapper<float> out,
const MatrixWrapper<float> in,
const VectorWrapper<uint> sentenceLengths)
{
// out = batches * states
// in = max sentence length * states * 1 * batches
// mapping = max length * batches
int id = threadIdx.x + blockIdx.x * blockDim.x;
//printf("id = %d in = %lu %lu %lu %lu = %lu %lu \n", id, in.dim(0), in.dim(1), in.dim(2), in.dim(3), in.size(), sizeof(in));
if (id < out.size()) {
uint indices[SHAPE_SIZE];
out.id2Indices(id, indices);
//printf("%d -> %lu %lu %lu %lu \n", id, indices[0], indices[1], indices[2], indices[3]);
size_t batch = indices[0];
size_t state = indices[1];
float sum = 0.0f;
int counter = 0;
for (size_t row = 0; row < in.dim(0); ++row) {
bool isWord = row < sentenceLengths[batch];
//printf("batch=%lu startMapInd=%lu mapOffset=%lu -> %d \n", batch, startMapInd, mapOffset, isWord);
if (isWord) {
sum += in(row, state, 0, batch);
++counter;
}
}
sum /= (float) counter;
out[id] = sum;
}
}
void Mean(Matrix& Out,
const Matrix& In,
const mblas::Vector<uint> &sentenceLengths)
{
assert(Out.dim(2) == 1);
assert(Out.dim(3) == 1);
assert(Out.dim(0) == In.dim(3));
assert(Out.dim(1) == In.dim(1));
// mean of each ROW
size_t batchNum = Out.dim(0) * Out.dim(2) * Out.dim(3);
size_t stateLength = Out.dim(1);
size_t sentenceLength = (In.dim(0) * In.dim(2) * In.dim(3)) / batchNum;
MatrixWrapper<float> outWrap(Out);
MatrixWrapper<float> inWrap(In);
//cerr << "outWrap=" << outWrap.Debug() << endl;
VectorWrapper<uint> sentenceLengthsWrap(sentenceLengths);
uint size = outWrap.size();
uint threads = ::min((uint)MAX_THREADS, size);
uint blocks = (size / threads) + ((size % threads == 0) ? 0 : 1);
hipLaunchKernelGGL(( gMean), dim3(blocks), dim3(threads), 0, CudaStreamHandler::GetStream(),
outWrap, inWrap, sentenceLengthsWrap);
}
__global__ void gWeightedMean(MatrixWrapper<float> out,
const MatrixWrapper<float> weights,
const MatrixWrapper<float> in,
const VectorWrapper<uint> mapping
)
{
int numHypos = weights.dim(0);
int states = in.dim(1);
int srcLen = weights.dim(1);
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < numHypos * states) {
int hypoInd = id / states;
int batchInd = mapping[hypoInd];
int stateInd = id % states;
//printf("hypoInd=%d batchInd=%d stateInd=%d \n", hypoInd, batchInd, stateInd);
float sum = 0.0f;
for (uint i = 0; i < srcLen; ++i) {
sum += weights(hypoInd, i, 0, 0) * in(i, stateInd, 0, batchInd);
}
out[id] = sum;
}
}
void WeightedMean(Matrix& Out,const Matrix& Weights, const Matrix& In, const mblas::Vector<uint>& mapping)
{
int numHypos = Weights.dim(0);
int states = In.dim(1);
Out.NewSize(numHypos, states);
MatrixWrapper<float> outWrap(Out);
MatrixWrapper<float> weightsWrap(Weights);
MatrixWrapper<float> inWrap(In);
VectorWrapper<uint> mappingWrap(mapping);
uint size = Out.size();
uint nThreads = ::min((uint) MAX_THREADS, (uint)size);
uint nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
hipLaunchKernelGGL(( gWeightedMean), dim3(nBlocks), dim3(nThreads), 0, CudaStreamHandler::GetStream(),
outWrap, weightsWrap, inWrap, mappingWrap);
/*
cerr << "nBlocks=" << nBlocks << endl;
cerr << "Out=" << outWrap.Debug() << endl;
cerr << "Weights=" << weightsWrap.Debug() << endl;
cerr << "In=" << inWrap.Debug() << endl;
cerr << "mapping=" << mapping.size() << endl;
for (size_t i = 0; i < mapping.size(); ++i) {
cerr << mapping[i] << " ";
}
cerr << endl << endl;
*/
}
Matrix& Transpose(Matrix& Out, const Matrix& In) {
size_t m = In.dim(0);
size_t n = In.dim(1);
Out.NewSize(n, m);
float alpha = 1.0;
float beta = 0.0;
hipblasSgeam(CublasHandler::GetHandle(), HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, &alpha, In.data(), n,
&beta, In.data(), n, Out.data(), m);
return Out;
}
Matrix& Transpose(Matrix& Out) {
thread_local Matrix Temp;
Transpose(Temp, Out);
Swap(Out, Temp);
return Out;
}
Matrix& Concat(Matrix& Out, const Matrix& In) {
size_t oldSize = Out.size();
Out.Resize(Out.dim(0) + In.dim(0), Out.dim(1));
mblas::copy(In.data(), In.size(), Out.data() + oldSize, hipMemcpyDeviceToDevice);
return Out;
}
Matrix& Copy(Matrix& Out, const Matrix& In) {
Out.NewSize(In.dim(0), In.dim(1), In.dim(2), In.dim(3));
mblas::copy(In.data(), In.size(), Out.data(), hipMemcpyDeviceToDevice);
return Out;
}
__global__ void gPasteRows(MatrixWrapper<float> out,
const MatrixWrapper<float> in,
int rowNo, int colNo)
{
int inRows = in.dim(0);
int inCols = in.dim(1);
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < inRows * inCols) {
int outCols = out.dim(1);
int inRow = id / inCols;
int inCol = id % inCols;
//out[outID] = in[id];
out(rowNo, inCol + colNo, 0, inRow) = in(inRow, inCol, 0, 0);
}
}
void PasteRows(Matrix& Out, const Matrix& In, const size_t rowNo, size_t colNo)
{
MatrixWrapper<float> outWrap(Out);
MatrixWrapper<float> inWrap(In);
uint size = In.size();
uint nThreads = ::min((uint) MAX_THREADS, (uint)size);
uint nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
hipLaunchKernelGGL(( gPasteRows), dim3(nBlocks), dim3(nThreads), 0, CudaStreamHandler::GetStream(),
outWrap, inWrap, rowNo, colNo);
}
Matrix& PasteRow(Matrix& Out,
const Matrix& In,
const size_t r, const size_t c)
{
size_t start = r * Out.dim(1) + c;
mblas::copy(In.data(), In.size(), Out.data() + start, hipMemcpyDeviceToDevice);
return Out;
}
Matrix& CopyRow(Matrix& Out,
const Matrix& In,
const size_t r, const size_t c) {
size_t length = In.dim(1) - c;
Out.NewSize(1, length);
size_t start = r * In.dim(1) + c;
//size_t end = start + length;
//mblas::copy(In.begin() + start, In.begin() + end, Out.begin());
mblas::copy(In.data() + start, length , Out.data(), hipMemcpyDeviceToDevice);
return Out;
}
__global__ void gCopyRows(MatrixWrapper<float> out,
const MatrixWrapper<float> in,
const VectorWrapper<uint> indicesWrap)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < out.size()) {
uint dim[SHAPE_SIZE];
out.id2Indices(id, dim);
size_t indicesInd = dim[0];
size_t inRow =indicesWrap[indicesInd];
//printf("indicesInd:%d\n",indicesInd);
//printf("inRow:%d\n",inRow);
out(indicesInd, dim[1], 0, 0) = in(inRow, dim[1], 0, 0);
}
}
Matrix& CopyRows(Matrix& Out,
const Matrix& In,
const mblas::Vector<uint>& indices)
{
assert(In.dim(1) == Out.dim(1));
assert(Out.dim(0) == indices.size());
assert(In.dim(2) == 1);
assert(In.dim(3) == 1);
assert(Out.dim(2) == 1);
assert(Out.dim(3) == 1);
/*
cerr << "Out=" << Out.Debug(0) << endl;
cerr << "In=" << In.Debug(0) << endl;
cerr << "indices=" << Debug(indices, 2) << endl;
cerr << endl;
*/
size_t size = Out.size();
size_t numPairs = indices.size();
MatrixWrapper<float> outWrap(Out);
const MatrixWrapper<float> inWrap(In);
const VectorWrapper<uint> indicesWrap(indices);
//cerr << "size=" << size << endl;
uint threads = ::min((uint) MAX_THREADS, (uint)size);
uint blocks = size / threads + ((size % threads == 0) ? 0 : 1);
hipLaunchKernelGGL(( gCopyRows), dim3(blocks), dim3(threads), 0, CudaStreamHandler::GetStream(),
outWrap, inWrap, indicesWrap);
return Out;
}
Matrix& Assemble(Matrix& Out,
const Matrix& In,
const mblas::Vector<uint>& indices) {
Out.NewSize(indices.size(), In.dim(1));
//cerr << "Assemble=" << Out.Debug() << " " << In.Debug() << indices.size() << endl;
CopyRows(Out, In, indices);
return Out;
}
__global__ void gSlice(MatrixWrapper<float> out,
const MatrixWrapper<float> in,
size_t n, size_t dim)
{
size_t row = blockIdx.x;
size_t inCol = threadIdx.x + dim * n;
size_t outCol = threadIdx.x;
while (outCol < out.dim(1)) {
out(row, outCol, 0, 0) = in(row, inCol, 0, 0);
inCol += blockDim.x;
outCol += blockDim.x;
}
}
Matrix& Slice(Matrix& Out,
const Matrix& In,
size_t n, size_t dim)
{
assert(In.dim(2) == 1);
assert(In.dim(3) == 1);
Out.NewSize(In.dim(0), dim);
MatrixWrapper<float> outWrap(Out);
const MatrixWrapper<float> inWrap(In);
/*
cerr << "outWrap=" << outWrap.Debug() << endl;
cerr << "inWrap=" << inWrap.Debug() << endl;
cerr << "n=" << n << endl;
cerr << "dim=" << dim << endl;
cerr << endl;
*/
uint threads = ::min((uint)MAX_THREADS, (uint)dim);
uint blocks = In.dim(0);
hipLaunchKernelGGL(( gSlice), dim3(blocks), dim3(threads), 0, CudaStreamHandler::GetStream(),
outWrap, inWrap, n, dim);
return Out;
}
Matrix& Prod(hipblasHandle_t handle, Matrix& C, const Matrix& A, const Matrix& B,
bool transA, bool transB)
{
assert((A.dim(2) == A.dim(3) == 1) || (B.dim(2) == B.dim(3) == 1));
Matrix::value_type alpha = 1.0;
Matrix::value_type beta = 0.0;
size_t m = A.dim(0) * A.dim(2) * A.dim(3);
size_t k = A.dim(1);
size_t mOut = A.dim(0);
size_t kOut = A.dim(1);
if(transA) {
std::swap(m, k);
std::swap(mOut, kOut);
}
size_t l = B.dim(0) * B.dim(2) * B.dim(3);
size_t n = B.dim(1);
size_t lOut = B.dim(0);
size_t nOut = B.dim(1);
if(transB) {
std::swap(l, n);
std::swap(lOut, nOut);
}
assert(k == l);
size_t lda = A.dim(1);
size_t ldb = B.dim(1);
size_t ldc = transB ? B.dim(0) * B.dim(2) * B.dim(3) : B.dim(1);
size_t dim2 = A.dim(2);
if (!transA && transB) {
// for GetAlignedSourceContext()
assert((A.dim(2) == A.dim(3) == 1));
C.NewSize(nOut, B.dim(2), 1, 1);
}
else {
C.NewSize(mOut, nOut, A.dim(2) * B.dim(2), A.dim(3) * B.dim(3));
}
/*
cerr << "C=" << C.Debug(0) << endl;
cerr << "A=" << A.Debug(0) << endl;
cerr << "B=" << B.Debug(0) << endl;
cerr << "transA=" << transA << endl;
cerr << "transB=" << transB << endl;
cerr << endl;
*/
hipblasOperation_t opA = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t opB = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N;
/*
hipblasStatus_t hipblasSgemm(hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const float *alpha,
const float *A, int lda,
const float *B, int ldb,
const float *beta,
float *C, int ldc)
*/
hipblasSgemm(handle, opB, opA,
n, m, k,
&alpha,
B.data(), ldb,
A.data(), lda,
&beta,
C.data(), ldc);
return C;
}
Matrix& Prod(Matrix& C, const Matrix& A, const Matrix& B,
bool transA, bool transB) {
//std::cerr << "1C=" << C.Debug() << std::endl;
//std::cerr << "1A=" << A.Debug() << std::endl;
//std::cerr << "1B=" << B.Debug() << std::endl;
Matrix &ret = Prod(CublasHandler::GetHandle(), C, A, B, transA, transB);
//std::cerr << "2C=" << C.Debug() << std::endl;
return ret;
}
__global__ void gSoftMax(MatrixWrapper<float> out,
const VectorWrapper<uint> batchIdsWrap,
const VectorWrapper<uint> sentenceLengthsWrap,
uint shareSize)
{
extern __shared__ float _share[];
size_t numHypos = out.dim(0);
size_t maxLength = out.dim(1);
int hypoInd = blockIdx.x;
int origSrcPos = threadIdx.x;
while (hypoInd < numHypos) {
VectorWrapper<float> _max(_share, shareSize);
_max[origSrcPos] = out(hypoInd, origSrcPos, 0, 0);
for (int tid = 0; tid < maxLength; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < maxLength) {
float value = out(hypoInd, srcPos, 0, 0);
int batch = batchIdsWrap[hypoInd];
value *= srcPos < sentenceLengthsWrap[batch] ? 1 : 0;
if (value > _max[origSrcPos]) {
_max[origSrcPos] = value;
}
}
}
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (origSrcPos < (len >> 1)) {
if(_max[origSrcPos + skip] > _max[origSrcPos])
_max[origSrcPos] = _max[origSrcPos + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
//float* _sum = _share;// + blockDim.x;
VectorWrapper<float> _sum(_share, shareSize);
_sum[origSrcPos] = 0.0f;
for (int tid = 0; tid < maxLength; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < maxLength) {
out(hypoInd, srcPos, 0, 0) = __expf(out(hypoInd, srcPos, 0, 0) - max);
int batch = batchIdsWrap[hypoInd];
out(hypoInd, srcPos, 0, 0) *= srcPos < sentenceLengthsWrap[batch] ? 1 : 0; // sentencesMappingWrap(srcPos, batch, 0, 0);
_sum[origSrcPos] += out(hypoInd, srcPos, 0, 0);
}
}
__syncthreads();
len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (origSrcPos < (len >> 1)) {
_sum[origSrcPos] += _sum[origSrcPos + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for (int tid = 0; tid < maxLength; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < maxLength) {
out(hypoInd, srcPos, 0, 0) /= _sum[0];
}
}
__syncthreads();
hypoInd += gridDim.x;
}
}
Matrix& Softmax(Matrix& Out,
const mblas::Vector<uint>& batchIds,
const mblas::Vector<uint> &sentenceLengths,
size_t batchSize)
{
size_t maxLength = Out.dim(1);
MatrixWrapper<float> outWrap(Out);
const VectorWrapper<uint> batchIdsWrap(batchIds);
const VectorWrapper<uint> sentenceLengthsWrap(sentenceLengths);
int blocks = batchSize;
int threads = ::min(MAX_THREADS, (int)maxLength);
int shared = sizeof(float) * threads;
hipLaunchKernelGGL(( gSoftMax), dim3(blocks), dim3(threads), shared, CudaStreamHandler::GetStream(),
outWrap, batchIdsWrap, sentenceLengthsWrap, threads);
return Out;
}
__global__ void gLogSoftMax(MatrixWrapper<float> out, uint shareSize)
{
extern __shared__ float _share[];
size_t rows = out.dim(0);
size_t cols = out.dim(1);
int rowIdx = blockIdx.x;
while (rowIdx < rows) {
//float* _max = _share;
VectorWrapper<float> _max(_share, shareSize);
_max[threadIdx.x] = out(rowIdx, threadIdx.x, 0, 0);
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
const float &val = out(rowIdx, id, 0, 0);
if (val > _max[threadIdx.x]) {
_max[threadIdx.x] = val;
}
}
}
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x])
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
//float* _sum = _share;// + blockDim.x;
VectorWrapper<float> _sum(_share, shareSize);
_sum[threadIdx.x] = 0.0f;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
//row[id] = exp(row[id] - max);
float &val = out(rowIdx, id, 0, 0);
val = __expf(val - max);
_sum[threadIdx.x] += val;
}
}
len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
//row[id] = log(row[id]/_sum[0]);
float &val = out(rowIdx, id, 0, 0);
val = __logf(val /_sum[0]);
}
}
__syncthreads();
rowIdx += gridDim.x;
}
}
Matrix& LogSoftmax(Matrix& Out)
{
MatrixWrapper<float> outWrap(Out);
int blocks = ::min(MAX_BLOCKS, (int)Out.dim(0));
int threads = ::min(MAX_THREADS, (int)Out.dim(1));
int shared = sizeof(float) * threads;
hipLaunchKernelGGL(( gLogSoftMax), dim3(blocks), dim3(threads), shared, CudaStreamHandler::GetStream(),
Out, threads);
return Out;
}
__global__ void gSetColumn(MatrixWrapper<float> in, int noColumn, float value) {
int n_rows = in.dim(0);
int rowNumber = threadIdx.x + blockDim.x * blockIdx.x;
if (rowNumber < n_rows) {
in(rowNumber, noColumn, 0, 0) = value;
}
}
void SetColumn(Matrix& In, int noColumn, float value) {
int nRows = In.dim(0);
int nBlocks = nRows / MAX_THREADS + ((nRows % MAX_THREADS == 0) ? 0 : 1);
int nThreads = ::min(MAX_THREADS, nRows);
MatrixWrapper<float> inWrap(In);
hipLaunchKernelGGL(( gSetColumn), dim3(nBlocks), dim3(nThreads), 0, mblas::CudaStreamHandler::GetStream(),
inWrap, noColumn, value);
}
__global__ void gFill(MatrixWrapper<float> in, float val) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < in.size()) {
in[index] = val;
}
}
void Fill(Matrix& In, float value) {
size_t size = In.size();
if (value) {
int nThreads = ::min(MAX_THREADS, (int)size);
int nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
MatrixWrapper<float> inWrap(In);
hipLaunchKernelGGL(( gFill), dim3(nBlocks), dim3(nThreads), 0, CudaStreamHandler::GetStream(),
inWrap, value);
}
else {
HANDLE_ERROR(hipMemsetAsync(In.data(), 0, size * sizeof(float), CudaStreamHandler::GetStream()));
}
}
__global__
void gMapMatrix(MatrixWrapper<float> in,
const VectorWrapper<uint> sentenceLengthsWrap,
int i)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < in.size()) {
int numCols = in.dim(1);
int batchIdx = tid / numCols;
int col = tid % numCols;
//in[tid] *= mappingWrap(i, batchIdx, 0, 0);
in(batchIdx, col, 0, 0) *= (i < sentenceLengthsWrap[batchIdx] ? 1 : 0);
}
}
void MapMatrix(Matrix& state,
const mblas::Vector<uint> &sentenceLengths,
size_t i)
{
// blank out rows in the state matrix where the word position i does not exist
// mapping is a concatenated array of 1 & 0 of each sentence in the batch to say whether word exists or not.
int batchSize = state.dim(0);
int stateLength = state.dim(1);
int numThreads = ::min((int)state.size(), MAX_THREADS);
int numBlocks = (state.size() / numThreads) + ((state.size() % numThreads == 0) ? 0 : 1);
MatrixWrapper<float> stateWrap(state);
VectorWrapper<uint> sentenceLengthsWrap(sentenceLengths);
hipLaunchKernelGGL(( gMapMatrix), dim3(numBlocks), dim3(numThreads), 0, CudaStreamHandler::GetStream(),
stateWrap, sentenceLengthsWrap, i);
/*
cerr << "nBlocks=" << numBlocks << endl;
cerr << "nThreads=" << numThreads << endl;
cerr << "stateWrap=" << stateWrap.Debug() << endl;
cerr << "mapping=" << Debug(mapping, 2) << endl;
cerr << "i=" << i << endl;
cerr << std::endl;
HANDLE_ERROR(hipDeviceSynchronize());
*/
}
__device__ uint getIndex(const dim3 &dim, const dim3 &val)
{
uint ret = dim.x * val.x + dim.y * val.y + dim.z * val.z;
return ret;
}
__global__ void gLNormalization(MatrixWrapper<float> out,
const MatrixWrapper<float> in,
const MatrixWrapper<float> alphaWrap,
const MatrixWrapper<float> betaWrap,
float eps=0.00001)
{
extern __shared__ float _share[];
//printf("blockDim.x=%d gridDim.x=%d \n", blockDim.x, gridDim.x);
// blockDim.x=512 gridDim.x=1
int cols = in.dim(1);
assert(blockIdx.x < in.dim(0));
assert(blockIdx.y < in.dim(2));
assert(blockIdx.z < in.dim(3));
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0f;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
_sum[threadIdx.x] += in(blockIdx.x, id, blockIdx.y, blockIdx.z);
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = _sum[0] / cols;
__syncthreads();
float* _sqSum = _share + blockDim.x;
_sqSum[threadIdx.x] = 0.0;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = in(blockIdx.x, id, blockIdx.y, blockIdx.z) - mean;
out(blockIdx.x, id, blockIdx.y, blockIdx.z) = ex;
_sqSum[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (_sqSum[0] / cols));
__syncthreads();
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float &val = out(blockIdx.x, id, blockIdx.y, blockIdx.z);
if (betaWrap.size()) {
val = alphaWrap[id] * (val / sigma) + betaWrap[id];
} else {
val = alphaWrap[id] * (val / sigma);
}
}
}
}
void Normalization(Matrix &out,
const Matrix &in,
const Matrix &alpha,
const Matrix *beta,
float eps)
{
assert(in.dim(0) < MAX_BLOCKS);
assert(in.dim(2) < MAX_BLOCKS);
assert(in.dim(3) < MAX_BLOCKS);
//out.Reshape(in.dim(0), in.dim(1), in.dim(2), in.dim(3));
int numThreads = ::min((uint) in.dim(1), (uint) MAX_THREADS);
dim3 numBlocks(in.dim(0), in.dim(2), in.dim(3));
int shared = numThreads * sizeof(float) * 2;
MatrixWrapper<float> outWrap(out);
const MatrixWrapper<float> inWrap(in);
const MatrixWrapper<float> alphaWrap(alpha);
MatrixWrapper<float> *betaWrap = beta ? new MatrixWrapper<float>(*beta) : new MatrixWrapper<float>();
hipLaunchKernelGGL(( gLNormalization), dim3(numBlocks), dim3(numThreads), shared, CudaStreamHandler::GetStream(),
outWrap, inWrap, alphaWrap, *betaWrap, eps);
/*
//std::cerr << "nBlocks=" << numBlocks << std::endl;
std::cerr << "nThreads=" << numThreads << std::endl;
std::cerr << "outWrap=" << outWrap.Debug() << std::endl;
std::cerr << "inWrap=" << inWrap.Debug() << std::endl;
std::cerr << "alphaWrap=" << alphaWrap.Debug() << std::endl;
std::cerr << "betaWrap=" << betaWrap->Debug() << std::endl;
std::cerr << std::endl;
HANDLE_ERROR(hipDeviceSynchronize());
*/
delete betaWrap;
}
void Normalization(Matrix& out, const Matrix& in, const Matrix& alpha, const Matrix& beta,
float eps)
{
Normalization(out, in, alpha, &beta, eps);
}
void Normalization(Matrix& out, const Matrix& in, const Matrix& alpha, float eps)
{
Normalization(out, in, alpha, nullptr, eps);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
#define LOWEST_FLOAT -1111111111111
#define HIGHEST_FLOAT +999999999999
__global__
void gBeamSizeInit(VectorWrapper<uint> hypo2BeamSizeWrap,
VectorWrapper<uint> batch2HypoWrap,
VectorWrapper<uint> hypo2CandidateWrap,
bool isFirst,
uint beamSizeSum,
const VectorWrapper<uint> beamSizesWrap)
{
uint hypoInd = 0;
uint candidateInd = 0;
uint a = 0, b = 0;
//printf("beamSizesWrap.size()=%u \n", beamSizesWrap.size());
for (size_t batchInd = 0; batchInd < beamSizesWrap.size(); ++batchInd) {
uint beamSize = beamSizesWrap[batchInd];
/*
printf("batchInd=%u ", batchInd);
printf("beamSize=%u ", beamSize);
printf("a=%u ", a);
printf("b=%u \n", b);
*/
if (beamSize) {
if (isFirst) {
assert(a < hypo2BeamSizeWrap.size());
assert(a < hypo2CandidateWrap.size());
hypo2BeamSizeWrap[a] = beamSize;
hypo2CandidateWrap[a] = candidateInd;
++a;
assert(b < batch2HypoWrap.size());
batch2HypoWrap[b] = batchInd;
++b;
candidateInd += beamSize;
}
else {
for (size_t j = 0; j < beamSize; ++j) {
assert(a < hypo2BeamSizeWrap.size());
assert(a < hypo2CandidateWrap.size());
hypo2BeamSizeWrap[a] = beamSize;
hypo2CandidateWrap[a] = candidateInd;
++a;
candidateInd += beamSize;
}
assert(b < batch2HypoWrap.size());
batch2HypoWrap[b] = hypoInd;
++b;
}
hypoInd += beamSize;
}
}
}
__device__
float GetMaxScore(const MatrixWrapper<NthOutBatch> &nBestMatrix)
{
float ret = LOWEST_FLOAT;
for (uint i = 0; i < nBestMatrix.dim(1); ++i) {
const NthOutBatch &curr = nBestMatrix[i];
if (curr.score > ret) {
ret = curr.score;
}
}
return ret;
}
__device__
void AddElement(float &minScore,
uint &i,
NthOutBatch *arr,
bool forbidUNK,
uint vocabInd,
const NthOutBatch &ele)
{
const float score = ele.score;
if (forbidUNK && vocabInd == UNK_ID) {
arr[i].score = LOWEST_FLOAT;
minScore = LOWEST_FLOAT;
}
else {
arr[i] = ele;
if (score < minScore) {
minScore = score;
}
++i;
}
}
__device__
void MergeElement(float &minScore,
NthOutBatch *arr,
uint arrSize,
const NthOutBatch &ele)
{
float newMinScore = HIGHEST_FLOAT;
bool found = false;
for (uint i = 0; i < arrSize; ++i) {
NthOutBatch &currEle = arr[i];
if (!found && minScore == currEle.score) {
currEle = ele;
found = true;
}
// update min score
if (currEle.score < newMinScore) {
newMinScore = currEle.score;
}
}
minScore = newMinScore;
}
__device__
void MergeElement(float &minScore,
NthOutBatch *arr,
uint arrSize,
const NthOutBatch &ele,
bool forbidUNK,
uint vocabInd)
{
if (forbidUNK && vocabInd == UNK_ID) {
// do nothing
}
else if (ele.score > minScore) {
// replace element with min score
MergeElement(minScore, arr, arrSize, ele);
/*
printf("arrInd=%d ind=%d vocabId=%d \n",
arrInd,
_max[threadIdx.x].ind,
_max[threadIdx.x].vocabId);
*/
}
}
__device__
void NBestAndMax(VectorWrapper<NthOutBatch> &nBestCandidatesWrap,
float &topScore,
const MatrixWrapper<float> &in,
const MatrixWrapper<float> &b4Wrap,
uint hypoInd,
uint maxBeamSize,
bool forbidUNK,
const VectorWrapper<uint> &hypo2BeamSizeWrap,
const VectorWrapper<uint> &hypo2CandidateWrap)
{
extern __shared__ char _sharePtr[];
// placeholder for shared mem in subsequent function SumAndLogSoftMax
//MatrixWrapper<float> maxMatrix((float*)_sharePtr, blockDim.x, 1, 1, 1);
void *ptrOffset = _sharePtr + sizeof(float) * blockDim.x;
MatrixWrapper<NthOutBatch> nBestMatrix((NthOutBatch*)ptrOffset, blockDim.x, maxBeamSize, 1, 1);
NthOutBatch *arr = &nBestMatrix(threadIdx.x, 0, 0, 0);
uint vocabSize = in.dim(1);
assert(hypoInd < hypo2BeamSizeWrap.size());
uint beamSize = hypo2BeamSizeWrap[hypoInd];
float minScore = HIGHEST_FLOAT;
// init
uint vocabInd = threadIdx.x;
uint i = 0;
while (vocabInd < vocabSize && i < beamSize) {
const float score = in(hypoInd, vocabInd, 0, 0) + b4Wrap(0, vocabInd, 0, 0);
uint arrInd = hypoInd * vocabSize + vocabInd;
NthOutBatch ele(arrInd, score, hypoInd, vocabInd);
AddElement(minScore, i, arr, forbidUNK, vocabInd, ele);
vocabInd += blockDim.x;
}
// MAIN LOOP
while (vocabInd < vocabSize) {
const float score = in(hypoInd, vocabInd, 0, 0) + b4Wrap(0, vocabInd, 0, 0);
uint arrInd = hypoInd * vocabSize + vocabInd;
NthOutBatch ele(arrInd, score, hypoInd, vocabInd);
MergeElement(minScore, arr, beamSize, ele, forbidUNK, vocabInd);
vocabInd += blockDim.x;
} // while (vocabInd < vocabSize) {
// merge nbest from different threads
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
NthOutBatch *dest = &nBestMatrix(threadIdx.x, 0, 0, 0);
for (uint i = 0; i < beamSize; ++i) {
const NthOutBatch &ele = nBestMatrix(threadIdx.x + skip, i, 0, 0);
if (ele.score > minScore) {
MergeElement(minScore, dest, beamSize, ele);
}
}
}
len = (len + 1) >> 1;
}
if (threadIdx.x == 0) {
__syncthreads();
// copy to output array
assert(hypoInd < hypo2CandidateWrap.size());
uint candidateInd = hypo2CandidateWrap[hypoInd];
for (uint i = 0; i < beamSize; ++i) {
const NthOutBatch &curr = nBestMatrix(0, i, 0, 0);
//printf("vocabInd=%u \n", best.vocabInd);
assert(candidateInd + i < nBestCandidatesWrap.size());
nBestCandidatesWrap[candidateInd + i] = curr;
}
}
__syncthreads();
topScore = GetMaxScore(nBestMatrix);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__device__
void SumAndLogSoftMax(VectorWrapper<NthOutBatch> &nBestCandidatesWrap,
const MatrixWrapper<float> &in,
const MatrixWrapper<float> &b4Wrap,
uint hypoInd,
uint maxBeamSize,
float topScore,
const VectorWrapper<uint> &hypo2BeamSizeWrap,
const VectorWrapper<uint> &hypo2CandidateWrap)
{
extern __shared__ float _share[];
VectorWrapper<float> _sum(_share, blockDim.x);
size_t vocabSize = in.dim(1);
// calc sum
_sum[threadIdx.x] = 0.0f;
for (int id = threadIdx.x; id < vocabSize; id += blockDim.x) {
//row[id] = exp(row[id] - max);
float val = in(hypoInd, id, 0, 0) + b4Wrap(0, id, 0, 0);
val = __expf(val - topScore);
_sum[threadIdx.x] += val;
}
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
// apply partition and log to top
if (threadIdx.x == 0) {
__syncthreads();
//printf("val=%f %f \n", in(rowIdx, ele.vocabId, 0, 0), val);
// nbest
uint beamSize = hypo2BeamSizeWrap[hypoInd];
uint startPos = hypo2CandidateWrap[hypoInd];
for (uint i = 0; i < beamSize; ++i) {
//__syncthreads();
NthOutBatch &ele = nBestCandidatesWrap[startPos + i];
float &val = ele.score;
val = __expf(val - topScore);
val = __logf(val /_sum[0]);
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gLogSoftMax(VectorWrapper<NthOutBatch> nBestCandidatesWrap,
const MatrixWrapper<float> in,
const MatrixWrapper<float> b4Wrap,
uint maxBeamSize,
bool forbidUNK,
const VectorWrapper<uint> hypo2BeamSizeWrap,
const VectorWrapper<uint> hypo2CandidateWrap)
{
uint hypos = in.dim(0);
uint vocabSize = in.dim(1);
uint hypoInd = blockIdx.x; // index of previous hypo
while (hypoInd < hypos) {
float topScore;
NBestAndMax(nBestCandidatesWrap,
topScore,
in,
b4Wrap,
hypoInd,
maxBeamSize,
forbidUNK,
hypo2BeamSizeWrap,
hypo2CandidateWrap);
//__syncthreads();
SumAndLogSoftMax(nBestCandidatesWrap,
in,
b4Wrap,
hypoInd,
maxBeamSize,
topScore,
hypo2BeamSizeWrap,
hypo2CandidateWrap);
__syncthreads();
hypoInd += gridDim.x;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gNBestPerBatch(VectorWrapper<NthOutBatch> nBestWrap,
VectorWrapper<NthOutBatch> nBestCandidatesWrap,
const MatrixWrapper<float> in,
const VectorWrapper<float> costsWrap,
uint maxBeamSize,
bool forbidUNK,
bool isFirst,
const VectorWrapper<uint> hypo2BeamSizeWrap,
const VectorWrapper<uint> batch2HypoWrap,
const VectorWrapper<uint> hypo2CandidateWrap)
{
//uint rows = in.dim(0);
uint batchSize = batch2HypoWrap.size();
uint batchInd = blockIdx.x;
while (batchInd < batchSize) {
assert(batchInd < batch2HypoWrap.size());
assert(batchInd < hypo2BeamSizeWrap.size());
assert(batchInd < nBestWrap.size());
uint hypoInd = batch2HypoWrap[batchInd];
uint beamSize = hypo2BeamSizeWrap[hypoInd];
assert(beamSize);
uint nextHypoInd;
if (isFirst) {
nextHypoInd = batchInd * beamSize;
}
else {
nextHypoInd = hypoInd;
}
// candiate from 1st hypo
float minScore = HIGHEST_FLOAT;
assert(hypoInd < hypo2CandidateWrap.size());
uint candidateInd = hypo2CandidateWrap[hypoInd];
for (uint i = 0; i < beamSize; ++i) {
float prevCost;
if (isFirst) {
assert(batchInd < costsWrap.size());
prevCost = costsWrap[batchInd];
}
else {
//printf("prevHypoInd=%, candidateInd=%d \n", prevHypoInd, candidateInd);
assert(hypoInd < costsWrap.size());
prevCost = costsWrap[hypoInd];
}
assert((nextHypoInd + i) < nBestWrap.size());
assert(candidateInd + i < nBestCandidatesWrap.size());
nBestWrap[nextHypoInd + i] = nBestCandidatesWrap[candidateInd + i];
float &score = nBestWrap[nextHypoInd + i].score;
score += prevCost;
if (score < minScore) {
minScore = score;
}
}
// candidates from other previous hypos
if (!isFirst) {
for (uint hypoOffset = 1; hypoOffset < beamSize; ++hypoOffset) {
//printf("hypoInd=%d \n", (hypoInd + hypoOffset));
//printf("prevHypoInd=%, candidateInd=%d \n", prevHypoInd, candidateInd);
assert((hypoInd + hypoOffset) < costsWrap.size());
float prevCost = costsWrap[hypoInd + hypoOffset];
assert((hypoInd + hypoOffset) < hypo2CandidateWrap.size());
uint candidateInd = hypo2CandidateWrap[hypoInd + hypoOffset];
for (uint candidateOffset = 0; candidateOffset < beamSize; ++candidateOffset) {
assert((candidateInd + candidateOffset) < nBestCandidatesWrap.size());
NthOutBatch &candidate = nBestCandidatesWrap[candidateInd + candidateOffset];
candidate.score += prevCost;
assert(nextHypoInd < nBestWrap.size());
NthOutBatch *arr = &nBestWrap[nextHypoInd];
if (candidate.score > minScore) {
MergeElement(minScore, arr, beamSize, candidate);
}
}
}
}
batchInd += gridDim.x;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
void LogSoftmaxAndNBest(mblas::Vector<NthOutBatch> &nBest,
const Matrix& in,
const Matrix& b4,
const mblas::Vector<float> &costs,
bool forbidUNK,
uint maxBeamSize,
const std::vector<uint>& beamSizes,
uint beamSizeSum,
bool isFirst)
{
//BEGIN_TIMER("LogSoftmax excl kernels");
//cerr << "in=" << in.Debug(0) << endl;
//cerr << "beamSizes=" << beamSizes.size() << endl;
// create beam size vectors on GPU but exclude empty beams
uint batchSize = 0;
uint candidateInd = 0;
for (size_t batchInd = 0; batchInd < beamSizes.size(); ++batchInd) {
uint beamSize = beamSizes[batchInd];
//cerr << "(" << beamSize << "," << hypoInd << ") ";
if (beamSize) {
if (isFirst) {
candidateInd += beamSize;
}
else {
candidateInd += beamSize * beamSize;
}
++batchSize;
}
}
mblas::Vector<uint> d_beamSizes(beamSizes);
mblas::Vector<uint> hypo2BeamSize(in.dim(0));
mblas::Vector<uint> hypo2Candidate(in.dim(0));
mblas::Vector<uint> batch2Hypo(batchSize);
mblas::Vector<NthOutBatch> nBestCandidates(candidateInd);
/*
cerr << "in=" << in.Debug(0) << endl;
cerr << "beamSizes=" << beamSizes.size() << endl;
cerr << "beamSizeSum=" << beamSizeSum << endl;
cerr << "batchSize=" << batchSize << endl;
cerr << "candidateInd=" << candidateInd << endl;
cerr << "hypo2BeamSize=" << Debug(hypo2BeamSize, 0) << endl;
cerr << "hypo2Candidate=" << Debug(hypo2Candidate, 0) << endl;
cerr << "batch2Hypo=" << Debug(batch2Hypo, 0) << endl;
cerr << "nBest=" << Debug(nBest, 0) << endl;
cerr << "nBestCandidates=" << Debug(nBestCandidates, 0) << endl;
cerr << endl;
*/
MatrixWrapper<float> inWrap(in);
MatrixWrapper<float> b4Wrap(b4);
VectorWrapper<uint> hypo2BeamSizeWrap(hypo2BeamSize);
VectorWrapper<uint> hypo2CandidateWrap(hypo2Candidate);
VectorWrapper<uint> batch2HypoWrap(batch2Hypo);
VectorWrapper<NthOutBatch> nBestWrap(nBest);
VectorWrapper<NthOutBatch> nBestCandidatesWrap(nBestCandidates);
VectorWrapper<float> costsWrap(costs);
VectorWrapper<uint> beamSizesWrap(d_beamSizes);
//PAUSE_TIMER("LogSoftmax excl kernels");
int blocks = ::min(MAX_BLOCKS, (int)in.dim(0));
int threads = ::min(MAX_THREADS, (int)in.dim(1));
int shared = sizeof(NthOutBatch) * threads * maxBeamSize
+ sizeof(float) * threads;
//HANDLE_ERROR( hipStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//cerr << "step0" << endl;
//BEGIN_TIMER("gBeamSizeInit");
hipLaunchKernelGGL(( gBeamSizeInit), dim3(1), dim3(1), 0, CudaStreamHandler::GetStream(),
hypo2BeamSizeWrap,
batch2HypoWrap,
hypo2CandidateWrap,
isFirst,
beamSizeSum,
beamSizesWrap
);
//PAUSE_TIMER("gBeamSizeInit");
/*
cerr << "hypo2BeamSize=" << Debug(hypo2BeamSize, 2) << endl;
cerr << "hypo2Candidate=" << Debug(hypo2Candidate, 2) << endl;
cerr << "batch2Hypo=" << Debug(batch2Hypo, 2) << endl;
cerr << endl;
*/
//HANDLE_ERROR( hipStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//cerr << "step1" << endl;
//BEGIN_TIMER("gLogSoftMax");
hipLaunchKernelGGL(( gLogSoftMax), dim3(blocks), dim3(threads), shared, CudaStreamHandler::GetStream(),
nBestCandidatesWrap,
inWrap,
b4Wrap,
maxBeamSize,
forbidUNK,
hypo2BeamSizeWrap,
hypo2CandidateWrap);
//PAUSE_TIMER("gLogSoftMax");
//HANDLE_ERROR( hipStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//cerr << "step2" << endl;
//BEGIN_TIMER("gNBestPerBatch");
hipLaunchKernelGGL(( gNBestPerBatch), dim3(blocks), dim3(1), 0, CudaStreamHandler::GetStream(),
nBestWrap,
nBestCandidatesWrap,
inWrap,
costsWrap,
maxBeamSize,
forbidUNK,
isFirst,
hypo2BeamSizeWrap,
batch2HypoWrap,
hypo2CandidateWrap);
//PAUSE_TIMER("gNBestPerBatch");
//HANDLE_ERROR( hipStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//cerr << "step3" << endl;
//cerr << "3costs=" << Debug(costs, 0) << endl;
}
void TestMemCpy()
{
using namespace std;
cerr << "Starting" << endl;
size_t NUM = 10;
vector<float> h_vec1(NUM);
for (size_t i = 0; i < NUM; ++i) {
h_vec1[i] = i * 3;
}
TestMemCpy(NUM, h_vec1.data());
cerr << "Finished" << endl;
}
} // namespace mblas
} // namespace GPU
} // namespace amunmt
| 5d61fb49cadbfe9f9697610f0dab17373f10a817.cu | #include "gpu/mblas/matrix_functions.h"
#include "gpu/mblas/handles.h"
using namespace std;
namespace amunmt {
namespace GPU {
namespace mblas {
thread_local CudaStreamHandler CudaStreamHandler::instance_;
thread_local CublasHandler CublasHandler::instance_;
Matrix& Swap(Matrix& Out, Matrix& In) {
Out.swap(In);
return Out;
}
__global__ void gMean(MatrixWrapper<float> out,
const MatrixWrapper<float> in,
const VectorWrapper<uint> sentenceLengths)
{
// out = batches * states
// in = max sentence length * states * 1 * batches
// mapping = max length * batches
int id = threadIdx.x + blockIdx.x * blockDim.x;
//printf("id = %d in = %lu %lu %lu %lu = %lu %lu \n", id, in.dim(0), in.dim(1), in.dim(2), in.dim(3), in.size(), sizeof(in));
if (id < out.size()) {
uint indices[SHAPE_SIZE];
out.id2Indices(id, indices);
//printf("%d -> %lu %lu %lu %lu \n", id, indices[0], indices[1], indices[2], indices[3]);
size_t batch = indices[0];
size_t state = indices[1];
float sum = 0.0f;
int counter = 0;
for (size_t row = 0; row < in.dim(0); ++row) {
bool isWord = row < sentenceLengths[batch];
//printf("batch=%lu startMapInd=%lu mapOffset=%lu -> %d \n", batch, startMapInd, mapOffset, isWord);
if (isWord) {
sum += in(row, state, 0, batch);
++counter;
}
}
sum /= (float) counter;
out[id] = sum;
}
}
void Mean(Matrix& Out,
const Matrix& In,
const mblas::Vector<uint> &sentenceLengths)
{
assert(Out.dim(2) == 1);
assert(Out.dim(3) == 1);
assert(Out.dim(0) == In.dim(3));
assert(Out.dim(1) == In.dim(1));
// mean of each ROW
size_t batchNum = Out.dim(0) * Out.dim(2) * Out.dim(3);
size_t stateLength = Out.dim(1);
size_t sentenceLength = (In.dim(0) * In.dim(2) * In.dim(3)) / batchNum;
MatrixWrapper<float> outWrap(Out);
MatrixWrapper<float> inWrap(In);
//cerr << "outWrap=" << outWrap.Debug() << endl;
VectorWrapper<uint> sentenceLengthsWrap(sentenceLengths);
uint size = outWrap.size();
uint threads = std::min((uint)MAX_THREADS, size);
uint blocks = (size / threads) + ((size % threads == 0) ? 0 : 1);
gMean<<<blocks, threads, 0, CudaStreamHandler::GetStream()>>>
(outWrap, inWrap, sentenceLengthsWrap);
}
__global__ void gWeightedMean(MatrixWrapper<float> out,
const MatrixWrapper<float> weights,
const MatrixWrapper<float> in,
const VectorWrapper<uint> mapping
)
{
int numHypos = weights.dim(0);
int states = in.dim(1);
int srcLen = weights.dim(1);
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < numHypos * states) {
int hypoInd = id / states;
int batchInd = mapping[hypoInd];
int stateInd = id % states;
//printf("hypoInd=%d batchInd=%d stateInd=%d \n", hypoInd, batchInd, stateInd);
float sum = 0.0f;
for (uint i = 0; i < srcLen; ++i) {
sum += weights(hypoInd, i, 0, 0) * in(i, stateInd, 0, batchInd);
}
out[id] = sum;
}
}
void WeightedMean(Matrix& Out,const Matrix& Weights, const Matrix& In, const mblas::Vector<uint>& mapping)
{
int numHypos = Weights.dim(0);
int states = In.dim(1);
Out.NewSize(numHypos, states);
MatrixWrapper<float> outWrap(Out);
MatrixWrapper<float> weightsWrap(Weights);
MatrixWrapper<float> inWrap(In);
VectorWrapper<uint> mappingWrap(mapping);
uint size = Out.size();
uint nThreads = std::min((uint) MAX_THREADS, (uint)size);
uint nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
gWeightedMean<<<nBlocks, nThreads, 0, CudaStreamHandler::GetStream()>>>
(outWrap, weightsWrap, inWrap, mappingWrap);
/*
cerr << "nBlocks=" << nBlocks << endl;
cerr << "Out=" << outWrap.Debug() << endl;
cerr << "Weights=" << weightsWrap.Debug() << endl;
cerr << "In=" << inWrap.Debug() << endl;
cerr << "mapping=" << mapping.size() << endl;
for (size_t i = 0; i < mapping.size(); ++i) {
cerr << mapping[i] << " ";
}
cerr << endl << endl;
*/
}
Matrix& Transpose(Matrix& Out, const Matrix& In) {
size_t m = In.dim(0);
size_t n = In.dim(1);
Out.NewSize(n, m);
float alpha = 1.0;
float beta = 0.0;
cublasSgeam(CublasHandler::GetHandle(), CUBLAS_OP_T, CUBLAS_OP_T, m, n, &alpha, In.data(), n,
&beta, In.data(), n, Out.data(), m);
return Out;
}
Matrix& Transpose(Matrix& Out) {
thread_local Matrix Temp;
Transpose(Temp, Out);
Swap(Out, Temp);
return Out;
}
Matrix& Concat(Matrix& Out, const Matrix& In) {
size_t oldSize = Out.size();
Out.Resize(Out.dim(0) + In.dim(0), Out.dim(1));
mblas::copy(In.data(), In.size(), Out.data() + oldSize, cudaMemcpyDeviceToDevice);
return Out;
}
Matrix& Copy(Matrix& Out, const Matrix& In) {
Out.NewSize(In.dim(0), In.dim(1), In.dim(2), In.dim(3));
mblas::copy(In.data(), In.size(), Out.data(), cudaMemcpyDeviceToDevice);
return Out;
}
__global__ void gPasteRows(MatrixWrapper<float> out,
const MatrixWrapper<float> in,
int rowNo, int colNo)
{
int inRows = in.dim(0);
int inCols = in.dim(1);
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < inRows * inCols) {
int outCols = out.dim(1);
int inRow = id / inCols;
int inCol = id % inCols;
//out[outID] = in[id];
out(rowNo, inCol + colNo, 0, inRow) = in(inRow, inCol, 0, 0);
}
}
void PasteRows(Matrix& Out, const Matrix& In, const size_t rowNo, size_t colNo)
{
MatrixWrapper<float> outWrap(Out);
MatrixWrapper<float> inWrap(In);
uint size = In.size();
uint nThreads = std::min((uint) MAX_THREADS, (uint)size);
uint nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
gPasteRows<<<nBlocks, nThreads, 0, CudaStreamHandler::GetStream()>>>
(outWrap, inWrap, rowNo, colNo);
}
Matrix& PasteRow(Matrix& Out,
const Matrix& In,
const size_t r, const size_t c)
{
size_t start = r * Out.dim(1) + c;
mblas::copy(In.data(), In.size(), Out.data() + start, cudaMemcpyDeviceToDevice);
return Out;
}
Matrix& CopyRow(Matrix& Out,
const Matrix& In,
const size_t r, const size_t c) {
size_t length = In.dim(1) - c;
Out.NewSize(1, length);
size_t start = r * In.dim(1) + c;
//size_t end = start + length;
//mblas::copy(In.begin() + start, In.begin() + end, Out.begin());
mblas::copy(In.data() + start, length , Out.data(), cudaMemcpyDeviceToDevice);
return Out;
}
__global__ void gCopyRows(MatrixWrapper<float> out,
const MatrixWrapper<float> in,
const VectorWrapper<uint> indicesWrap)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < out.size()) {
uint dim[SHAPE_SIZE];
out.id2Indices(id, dim);
size_t indicesInd = dim[0];
size_t inRow =indicesWrap[indicesInd];
//printf("indicesInd:%d\n",indicesInd);
//printf("inRow:%d\n",inRow);
out(indicesInd, dim[1], 0, 0) = in(inRow, dim[1], 0, 0);
}
}
Matrix& CopyRows(Matrix& Out,
const Matrix& In,
const mblas::Vector<uint>& indices)
{
assert(In.dim(1) == Out.dim(1));
assert(Out.dim(0) == indices.size());
assert(In.dim(2) == 1);
assert(In.dim(3) == 1);
assert(Out.dim(2) == 1);
assert(Out.dim(3) == 1);
/*
cerr << "Out=" << Out.Debug(0) << endl;
cerr << "In=" << In.Debug(0) << endl;
cerr << "indices=" << Debug(indices, 2) << endl;
cerr << endl;
*/
size_t size = Out.size();
size_t numPairs = indices.size();
MatrixWrapper<float> outWrap(Out);
const MatrixWrapper<float> inWrap(In);
const VectorWrapper<uint> indicesWrap(indices);
//cerr << "size=" << size << endl;
uint threads = std::min((uint) MAX_THREADS, (uint)size);
uint blocks = size / threads + ((size % threads == 0) ? 0 : 1);
gCopyRows<<<blocks, threads, 0, CudaStreamHandler::GetStream()>>>
(outWrap, inWrap, indicesWrap);
return Out;
}
Matrix& Assemble(Matrix& Out,
const Matrix& In,
const mblas::Vector<uint>& indices) {
Out.NewSize(indices.size(), In.dim(1));
//cerr << "Assemble=" << Out.Debug() << " " << In.Debug() << indices.size() << endl;
CopyRows(Out, In, indices);
return Out;
}
__global__ void gSlice(MatrixWrapper<float> out,
const MatrixWrapper<float> in,
size_t n, size_t dim)
{
size_t row = blockIdx.x;
size_t inCol = threadIdx.x + dim * n;
size_t outCol = threadIdx.x;
while (outCol < out.dim(1)) {
out(row, outCol, 0, 0) = in(row, inCol, 0, 0);
inCol += blockDim.x;
outCol += blockDim.x;
}
}
Matrix& Slice(Matrix& Out,
const Matrix& In,
size_t n, size_t dim)
{
assert(In.dim(2) == 1);
assert(In.dim(3) == 1);
Out.NewSize(In.dim(0), dim);
MatrixWrapper<float> outWrap(Out);
const MatrixWrapper<float> inWrap(In);
/*
cerr << "outWrap=" << outWrap.Debug() << endl;
cerr << "inWrap=" << inWrap.Debug() << endl;
cerr << "n=" << n << endl;
cerr << "dim=" << dim << endl;
cerr << endl;
*/
uint threads = std::min((uint)MAX_THREADS, (uint)dim);
uint blocks = In.dim(0);
gSlice<<<blocks, threads, 0, CudaStreamHandler::GetStream()>>>
(outWrap, inWrap, n, dim);
return Out;
}
Matrix& Prod(cublasHandle_t handle, Matrix& C, const Matrix& A, const Matrix& B,
bool transA, bool transB)
{
assert((A.dim(2) == A.dim(3) == 1) || (B.dim(2) == B.dim(3) == 1));
Matrix::value_type alpha = 1.0;
Matrix::value_type beta = 0.0;
size_t m = A.dim(0) * A.dim(2) * A.dim(3);
size_t k = A.dim(1);
size_t mOut = A.dim(0);
size_t kOut = A.dim(1);
if(transA) {
std::swap(m, k);
std::swap(mOut, kOut);
}
size_t l = B.dim(0) * B.dim(2) * B.dim(3);
size_t n = B.dim(1);
size_t lOut = B.dim(0);
size_t nOut = B.dim(1);
if(transB) {
std::swap(l, n);
std::swap(lOut, nOut);
}
assert(k == l);
size_t lda = A.dim(1);
size_t ldb = B.dim(1);
size_t ldc = transB ? B.dim(0) * B.dim(2) * B.dim(3) : B.dim(1);
size_t dim2 = A.dim(2);
if (!transA && transB) {
// for GetAlignedSourceContext()
assert((A.dim(2) == A.dim(3) == 1));
C.NewSize(nOut, B.dim(2), 1, 1);
}
else {
C.NewSize(mOut, nOut, A.dim(2) * B.dim(2), A.dim(3) * B.dim(3));
}
/*
cerr << "C=" << C.Debug(0) << endl;
cerr << "A=" << A.Debug(0) << endl;
cerr << "B=" << B.Debug(0) << endl;
cerr << "transA=" << transA << endl;
cerr << "transB=" << transB << endl;
cerr << endl;
*/
cublasOperation_t opA = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t opB = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
/*
cublasStatus_t cublasSgemm(cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const float *alpha,
const float *A, int lda,
const float *B, int ldb,
const float *beta,
float *C, int ldc)
*/
cublasSgemm(handle, opB, opA,
n, m, k,
&alpha,
B.data(), ldb,
A.data(), lda,
&beta,
C.data(), ldc);
return C;
}
Matrix& Prod(Matrix& C, const Matrix& A, const Matrix& B,
bool transA, bool transB) {
//std::cerr << "1C=" << C.Debug() << std::endl;
//std::cerr << "1A=" << A.Debug() << std::endl;
//std::cerr << "1B=" << B.Debug() << std::endl;
Matrix &ret = Prod(CublasHandler::GetHandle(), C, A, B, transA, transB);
//std::cerr << "2C=" << C.Debug() << std::endl;
return ret;
}
__global__ void gSoftMax(MatrixWrapper<float> out,
const VectorWrapper<uint> batchIdsWrap,
const VectorWrapper<uint> sentenceLengthsWrap,
uint shareSize)
{
extern __shared__ float _share[];
size_t numHypos = out.dim(0);
size_t maxLength = out.dim(1);
int hypoInd = blockIdx.x;
int origSrcPos = threadIdx.x;
while (hypoInd < numHypos) {
VectorWrapper<float> _max(_share, shareSize);
_max[origSrcPos] = out(hypoInd, origSrcPos, 0, 0);
for (int tid = 0; tid < maxLength; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < maxLength) {
float value = out(hypoInd, srcPos, 0, 0);
int batch = batchIdsWrap[hypoInd];
value *= srcPos < sentenceLengthsWrap[batch] ? 1 : 0;
if (value > _max[origSrcPos]) {
_max[origSrcPos] = value;
}
}
}
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (origSrcPos < (len >> 1)) {
if(_max[origSrcPos + skip] > _max[origSrcPos])
_max[origSrcPos] = _max[origSrcPos + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
//float* _sum = _share;// + blockDim.x;
VectorWrapper<float> _sum(_share, shareSize);
_sum[origSrcPos] = 0.0f;
for (int tid = 0; tid < maxLength; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < maxLength) {
out(hypoInd, srcPos, 0, 0) = __expf(out(hypoInd, srcPos, 0, 0) - max);
int batch = batchIdsWrap[hypoInd];
out(hypoInd, srcPos, 0, 0) *= srcPos < sentenceLengthsWrap[batch] ? 1 : 0; // sentencesMappingWrap(srcPos, batch, 0, 0);
_sum[origSrcPos] += out(hypoInd, srcPos, 0, 0);
}
}
__syncthreads();
len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (origSrcPos < (len >> 1)) {
_sum[origSrcPos] += _sum[origSrcPos + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for (int tid = 0; tid < maxLength; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < maxLength) {
out(hypoInd, srcPos, 0, 0) /= _sum[0];
}
}
__syncthreads();
hypoInd += gridDim.x;
}
}
Matrix& Softmax(Matrix& Out,
const mblas::Vector<uint>& batchIds,
const mblas::Vector<uint> &sentenceLengths,
size_t batchSize)
{
size_t maxLength = Out.dim(1);
MatrixWrapper<float> outWrap(Out);
const VectorWrapper<uint> batchIdsWrap(batchIds);
const VectorWrapper<uint> sentenceLengthsWrap(sentenceLengths);
int blocks = batchSize;
int threads = std::min(MAX_THREADS, (int)maxLength);
int shared = sizeof(float) * threads;
gSoftMax<<<blocks, threads, shared, CudaStreamHandler::GetStream()>>>
(outWrap, batchIdsWrap, sentenceLengthsWrap, threads);
return Out;
}
__global__ void gLogSoftMax(MatrixWrapper<float> out, uint shareSize)
{
extern __shared__ float _share[];
size_t rows = out.dim(0);
size_t cols = out.dim(1);
int rowIdx = blockIdx.x;
while (rowIdx < rows) {
//float* _max = _share;
VectorWrapper<float> _max(_share, shareSize);
_max[threadIdx.x] = out(rowIdx, threadIdx.x, 0, 0);
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
const float &val = out(rowIdx, id, 0, 0);
if (val > _max[threadIdx.x]) {
_max[threadIdx.x] = val;
}
}
}
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x])
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
//float* _sum = _share;// + blockDim.x;
VectorWrapper<float> _sum(_share, shareSize);
_sum[threadIdx.x] = 0.0f;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
//row[id] = exp(row[id] - max);
float &val = out(rowIdx, id, 0, 0);
val = __expf(val - max);
_sum[threadIdx.x] += val;
}
}
len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
//row[id] = log(row[id]/_sum[0]);
float &val = out(rowIdx, id, 0, 0);
val = __logf(val /_sum[0]);
}
}
__syncthreads();
rowIdx += gridDim.x;
}
}
Matrix& LogSoftmax(Matrix& Out)
{
MatrixWrapper<float> outWrap(Out);
int blocks = std::min(MAX_BLOCKS, (int)Out.dim(0));
int threads = std::min(MAX_THREADS, (int)Out.dim(1));
int shared = sizeof(float) * threads;
gLogSoftMax<<<blocks, threads, shared, CudaStreamHandler::GetStream()>>>
(Out, threads);
return Out;
}
__global__ void gSetColumn(MatrixWrapper<float> in, int noColumn, float value) {
int n_rows = in.dim(0);
int rowNumber = threadIdx.x + blockDim.x * blockIdx.x;
if (rowNumber < n_rows) {
in(rowNumber, noColumn, 0, 0) = value;
}
}
void SetColumn(Matrix& In, int noColumn, float value) {
int nRows = In.dim(0);
int nBlocks = nRows / MAX_THREADS + ((nRows % MAX_THREADS == 0) ? 0 : 1);
int nThreads = std::min(MAX_THREADS, nRows);
MatrixWrapper<float> inWrap(In);
gSetColumn<<<nBlocks, nThreads, 0, mblas::CudaStreamHandler::GetStream()>>>
(inWrap, noColumn, value);
}
__global__ void gFill(MatrixWrapper<float> in, float val) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < in.size()) {
in[index] = val;
}
}
void Fill(Matrix& In, float value) {
size_t size = In.size();
if (value) {
int nThreads = std::min(MAX_THREADS, (int)size);
int nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
MatrixWrapper<float> inWrap(In);
gFill<<<nBlocks, nThreads, 0, CudaStreamHandler::GetStream()>>>
(inWrap, value);
}
else {
HANDLE_ERROR(cudaMemsetAsync(In.data(), 0, size * sizeof(float), CudaStreamHandler::GetStream()));
}
}
__global__
void gMapMatrix(MatrixWrapper<float> in,
const VectorWrapper<uint> sentenceLengthsWrap,
int i)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < in.size()) {
int numCols = in.dim(1);
int batchIdx = tid / numCols;
int col = tid % numCols;
//in[tid] *= mappingWrap(i, batchIdx, 0, 0);
in(batchIdx, col, 0, 0) *= (i < sentenceLengthsWrap[batchIdx] ? 1 : 0);
}
}
void MapMatrix(Matrix& state,
const mblas::Vector<uint> &sentenceLengths,
size_t i)
{
// blank out rows in the state matrix where the word position i does not exist
// mapping is a concatenated array of 1 & 0 of each sentence in the batch to say whether word exists or not.
int batchSize = state.dim(0);
int stateLength = state.dim(1);
int numThreads = std::min((int)state.size(), MAX_THREADS);
int numBlocks = (state.size() / numThreads) + ((state.size() % numThreads == 0) ? 0 : 1);
MatrixWrapper<float> stateWrap(state);
VectorWrapper<uint> sentenceLengthsWrap(sentenceLengths);
gMapMatrix<<<numBlocks, numThreads, 0, CudaStreamHandler::GetStream()>>>
(stateWrap, sentenceLengthsWrap, i);
/*
cerr << "nBlocks=" << numBlocks << endl;
cerr << "nThreads=" << numThreads << endl;
cerr << "stateWrap=" << stateWrap.Debug() << endl;
cerr << "mapping=" << Debug(mapping, 2) << endl;
cerr << "i=" << i << endl;
cerr << std::endl;
HANDLE_ERROR(cudaDeviceSynchronize());
*/
}
__device__ uint getIndex(const dim3 &dim, const dim3 &val)
{
uint ret = dim.x * val.x + dim.y * val.y + dim.z * val.z;
return ret;
}
__global__ void gLNormalization(MatrixWrapper<float> out,
const MatrixWrapper<float> in,
const MatrixWrapper<float> alphaWrap,
const MatrixWrapper<float> betaWrap,
float eps=0.00001)
{
extern __shared__ float _share[];
//printf("blockDim.x=%d gridDim.x=%d \n", blockDim.x, gridDim.x);
// blockDim.x=512 gridDim.x=1
int cols = in.dim(1);
assert(blockIdx.x < in.dim(0));
assert(blockIdx.y < in.dim(2));
assert(blockIdx.z < in.dim(3));
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0f;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
_sum[threadIdx.x] += in(blockIdx.x, id, blockIdx.y, blockIdx.z);
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = _sum[0] / cols;
__syncthreads();
float* _sqSum = _share + blockDim.x;
_sqSum[threadIdx.x] = 0.0;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = in(blockIdx.x, id, blockIdx.y, blockIdx.z) - mean;
out(blockIdx.x, id, blockIdx.y, blockIdx.z) = ex;
_sqSum[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (_sqSum[0] / cols));
__syncthreads();
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float &val = out(blockIdx.x, id, blockIdx.y, blockIdx.z);
if (betaWrap.size()) {
val = alphaWrap[id] * (val / sigma) + betaWrap[id];
} else {
val = alphaWrap[id] * (val / sigma);
}
}
}
}
void Normalization(Matrix &out,
const Matrix &in,
const Matrix &alpha,
const Matrix *beta,
float eps)
{
assert(in.dim(0) < MAX_BLOCKS);
assert(in.dim(2) < MAX_BLOCKS);
assert(in.dim(3) < MAX_BLOCKS);
//out.Reshape(in.dim(0), in.dim(1), in.dim(2), in.dim(3));
int numThreads = std::min((uint) in.dim(1), (uint) MAX_THREADS);
dim3 numBlocks(in.dim(0), in.dim(2), in.dim(3));
int shared = numThreads * sizeof(float) * 2;
MatrixWrapper<float> outWrap(out);
const MatrixWrapper<float> inWrap(in);
const MatrixWrapper<float> alphaWrap(alpha);
MatrixWrapper<float> *betaWrap = beta ? new MatrixWrapper<float>(*beta) : new MatrixWrapper<float>();
gLNormalization<<<numBlocks, numThreads, shared, CudaStreamHandler::GetStream()>>>
(outWrap, inWrap, alphaWrap, *betaWrap, eps);
/*
//std::cerr << "nBlocks=" << numBlocks << std::endl;
std::cerr << "nThreads=" << numThreads << std::endl;
std::cerr << "outWrap=" << outWrap.Debug() << std::endl;
std::cerr << "inWrap=" << inWrap.Debug() << std::endl;
std::cerr << "alphaWrap=" << alphaWrap.Debug() << std::endl;
std::cerr << "betaWrap=" << betaWrap->Debug() << std::endl;
std::cerr << std::endl;
HANDLE_ERROR(cudaDeviceSynchronize());
*/
delete betaWrap;
}
void Normalization(Matrix& out, const Matrix& in, const Matrix& alpha, const Matrix& beta,
float eps)
{
Normalization(out, in, alpha, &beta, eps);
}
void Normalization(Matrix& out, const Matrix& in, const Matrix& alpha, float eps)
{
Normalization(out, in, alpha, nullptr, eps);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
#define LOWEST_FLOAT -1111111111111
#define HIGHEST_FLOAT +999999999999
__global__
void gBeamSizeInit(VectorWrapper<uint> hypo2BeamSizeWrap,
VectorWrapper<uint> batch2HypoWrap,
VectorWrapper<uint> hypo2CandidateWrap,
bool isFirst,
uint beamSizeSum,
const VectorWrapper<uint> beamSizesWrap)
{
uint hypoInd = 0;
uint candidateInd = 0;
uint a = 0, b = 0;
//printf("beamSizesWrap.size()=%u \n", beamSizesWrap.size());
for (size_t batchInd = 0; batchInd < beamSizesWrap.size(); ++batchInd) {
uint beamSize = beamSizesWrap[batchInd];
/*
printf("batchInd=%u ", batchInd);
printf("beamSize=%u ", beamSize);
printf("a=%u ", a);
printf("b=%u \n", b);
*/
if (beamSize) {
if (isFirst) {
assert(a < hypo2BeamSizeWrap.size());
assert(a < hypo2CandidateWrap.size());
hypo2BeamSizeWrap[a] = beamSize;
hypo2CandidateWrap[a] = candidateInd;
++a;
assert(b < batch2HypoWrap.size());
batch2HypoWrap[b] = batchInd;
++b;
candidateInd += beamSize;
}
else {
for (size_t j = 0; j < beamSize; ++j) {
assert(a < hypo2BeamSizeWrap.size());
assert(a < hypo2CandidateWrap.size());
hypo2BeamSizeWrap[a] = beamSize;
hypo2CandidateWrap[a] = candidateInd;
++a;
candidateInd += beamSize;
}
assert(b < batch2HypoWrap.size());
batch2HypoWrap[b] = hypoInd;
++b;
}
hypoInd += beamSize;
}
}
}
__device__
float GetMaxScore(const MatrixWrapper<NthOutBatch> &nBestMatrix)
{
float ret = LOWEST_FLOAT;
for (uint i = 0; i < nBestMatrix.dim(1); ++i) {
const NthOutBatch &curr = nBestMatrix[i];
if (curr.score > ret) {
ret = curr.score;
}
}
return ret;
}
__device__
void AddElement(float &minScore,
uint &i,
NthOutBatch *arr,
bool forbidUNK,
uint vocabInd,
const NthOutBatch &ele)
{
const float score = ele.score;
if (forbidUNK && vocabInd == UNK_ID) {
arr[i].score = LOWEST_FLOAT;
minScore = LOWEST_FLOAT;
}
else {
arr[i] = ele;
if (score < minScore) {
minScore = score;
}
++i;
}
}
__device__
void MergeElement(float &minScore,
NthOutBatch *arr,
uint arrSize,
const NthOutBatch &ele)
{
float newMinScore = HIGHEST_FLOAT;
bool found = false;
for (uint i = 0; i < arrSize; ++i) {
NthOutBatch &currEle = arr[i];
if (!found && minScore == currEle.score) {
currEle = ele;
found = true;
}
// update min score
if (currEle.score < newMinScore) {
newMinScore = currEle.score;
}
}
minScore = newMinScore;
}
__device__
void MergeElement(float &minScore,
NthOutBatch *arr,
uint arrSize,
const NthOutBatch &ele,
bool forbidUNK,
uint vocabInd)
{
if (forbidUNK && vocabInd == UNK_ID) {
// do nothing
}
else if (ele.score > minScore) {
// replace element with min score
MergeElement(minScore, arr, arrSize, ele);
/*
printf("arrInd=%d ind=%d vocabId=%d \n",
arrInd,
_max[threadIdx.x].ind,
_max[threadIdx.x].vocabId);
*/
}
}
__device__
void NBestAndMax(VectorWrapper<NthOutBatch> &nBestCandidatesWrap,
float &topScore,
const MatrixWrapper<float> &in,
const MatrixWrapper<float> &b4Wrap,
uint hypoInd,
uint maxBeamSize,
bool forbidUNK,
const VectorWrapper<uint> &hypo2BeamSizeWrap,
const VectorWrapper<uint> &hypo2CandidateWrap)
{
extern __shared__ char _sharePtr[];
// placeholder for shared mem in subsequent function SumAndLogSoftMax
//MatrixWrapper<float> maxMatrix((float*)_sharePtr, blockDim.x, 1, 1, 1);
void *ptrOffset = _sharePtr + sizeof(float) * blockDim.x;
MatrixWrapper<NthOutBatch> nBestMatrix((NthOutBatch*)ptrOffset, blockDim.x, maxBeamSize, 1, 1);
NthOutBatch *arr = &nBestMatrix(threadIdx.x, 0, 0, 0);
uint vocabSize = in.dim(1);
assert(hypoInd < hypo2BeamSizeWrap.size());
uint beamSize = hypo2BeamSizeWrap[hypoInd];
float minScore = HIGHEST_FLOAT;
// init
uint vocabInd = threadIdx.x;
uint i = 0;
while (vocabInd < vocabSize && i < beamSize) {
const float score = in(hypoInd, vocabInd, 0, 0) + b4Wrap(0, vocabInd, 0, 0);
uint arrInd = hypoInd * vocabSize + vocabInd;
NthOutBatch ele(arrInd, score, hypoInd, vocabInd);
AddElement(minScore, i, arr, forbidUNK, vocabInd, ele);
vocabInd += blockDim.x;
}
// MAIN LOOP
while (vocabInd < vocabSize) {
const float score = in(hypoInd, vocabInd, 0, 0) + b4Wrap(0, vocabInd, 0, 0);
uint arrInd = hypoInd * vocabSize + vocabInd;
NthOutBatch ele(arrInd, score, hypoInd, vocabInd);
MergeElement(minScore, arr, beamSize, ele, forbidUNK, vocabInd);
vocabInd += blockDim.x;
} // while (vocabInd < vocabSize) {
// merge nbest from different threads
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
NthOutBatch *dest = &nBestMatrix(threadIdx.x, 0, 0, 0);
for (uint i = 0; i < beamSize; ++i) {
const NthOutBatch &ele = nBestMatrix(threadIdx.x + skip, i, 0, 0);
if (ele.score > minScore) {
MergeElement(minScore, dest, beamSize, ele);
}
}
}
len = (len + 1) >> 1;
}
if (threadIdx.x == 0) {
__syncthreads();
// copy to output array
assert(hypoInd < hypo2CandidateWrap.size());
uint candidateInd = hypo2CandidateWrap[hypoInd];
for (uint i = 0; i < beamSize; ++i) {
const NthOutBatch &curr = nBestMatrix(0, i, 0, 0);
//printf("vocabInd=%u \n", best.vocabInd);
assert(candidateInd + i < nBestCandidatesWrap.size());
nBestCandidatesWrap[candidateInd + i] = curr;
}
}
__syncthreads();
topScore = GetMaxScore(nBestMatrix);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__device__
void SumAndLogSoftMax(VectorWrapper<NthOutBatch> &nBestCandidatesWrap,
const MatrixWrapper<float> &in,
const MatrixWrapper<float> &b4Wrap,
uint hypoInd,
uint maxBeamSize,
float topScore,
const VectorWrapper<uint> &hypo2BeamSizeWrap,
const VectorWrapper<uint> &hypo2CandidateWrap)
{
extern __shared__ float _share[];
VectorWrapper<float> _sum(_share, blockDim.x);
size_t vocabSize = in.dim(1);
// calc sum
_sum[threadIdx.x] = 0.0f;
for (int id = threadIdx.x; id < vocabSize; id += blockDim.x) {
//row[id] = exp(row[id] - max);
float val = in(hypoInd, id, 0, 0) + b4Wrap(0, id, 0, 0);
val = __expf(val - topScore);
_sum[threadIdx.x] += val;
}
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
// apply partition and log to top
if (threadIdx.x == 0) {
__syncthreads();
//printf("val=%f %f \n", in(rowIdx, ele.vocabId, 0, 0), val);
// nbest
uint beamSize = hypo2BeamSizeWrap[hypoInd];
uint startPos = hypo2CandidateWrap[hypoInd];
for (uint i = 0; i < beamSize; ++i) {
//__syncthreads();
NthOutBatch &ele = nBestCandidatesWrap[startPos + i];
float &val = ele.score;
val = __expf(val - topScore);
val = __logf(val /_sum[0]);
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gLogSoftMax(VectorWrapper<NthOutBatch> nBestCandidatesWrap,
const MatrixWrapper<float> in,
const MatrixWrapper<float> b4Wrap,
uint maxBeamSize,
bool forbidUNK,
const VectorWrapper<uint> hypo2BeamSizeWrap,
const VectorWrapper<uint> hypo2CandidateWrap)
{
uint hypos = in.dim(0);
uint vocabSize = in.dim(1);
uint hypoInd = blockIdx.x; // index of previous hypo
while (hypoInd < hypos) {
float topScore;
NBestAndMax(nBestCandidatesWrap,
topScore,
in,
b4Wrap,
hypoInd,
maxBeamSize,
forbidUNK,
hypo2BeamSizeWrap,
hypo2CandidateWrap);
//__syncthreads();
SumAndLogSoftMax(nBestCandidatesWrap,
in,
b4Wrap,
hypoInd,
maxBeamSize,
topScore,
hypo2BeamSizeWrap,
hypo2CandidateWrap);
__syncthreads();
hypoInd += gridDim.x;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gNBestPerBatch(VectorWrapper<NthOutBatch> nBestWrap,
VectorWrapper<NthOutBatch> nBestCandidatesWrap,
const MatrixWrapper<float> in,
const VectorWrapper<float> costsWrap,
uint maxBeamSize,
bool forbidUNK,
bool isFirst,
const VectorWrapper<uint> hypo2BeamSizeWrap,
const VectorWrapper<uint> batch2HypoWrap,
const VectorWrapper<uint> hypo2CandidateWrap)
{
//uint rows = in.dim(0);
uint batchSize = batch2HypoWrap.size();
uint batchInd = blockIdx.x;
while (batchInd < batchSize) {
assert(batchInd < batch2HypoWrap.size());
assert(batchInd < hypo2BeamSizeWrap.size());
assert(batchInd < nBestWrap.size());
uint hypoInd = batch2HypoWrap[batchInd];
uint beamSize = hypo2BeamSizeWrap[hypoInd];
assert(beamSize);
uint nextHypoInd;
if (isFirst) {
nextHypoInd = batchInd * beamSize;
}
else {
nextHypoInd = hypoInd;
}
// candiate from 1st hypo
float minScore = HIGHEST_FLOAT;
assert(hypoInd < hypo2CandidateWrap.size());
uint candidateInd = hypo2CandidateWrap[hypoInd];
for (uint i = 0; i < beamSize; ++i) {
float prevCost;
if (isFirst) {
assert(batchInd < costsWrap.size());
prevCost = costsWrap[batchInd];
}
else {
//printf("prevHypoInd=%, candidateInd=%d \n", prevHypoInd, candidateInd);
assert(hypoInd < costsWrap.size());
prevCost = costsWrap[hypoInd];
}
assert((nextHypoInd + i) < nBestWrap.size());
assert(candidateInd + i < nBestCandidatesWrap.size());
nBestWrap[nextHypoInd + i] = nBestCandidatesWrap[candidateInd + i];
float &score = nBestWrap[nextHypoInd + i].score;
score += prevCost;
if (score < minScore) {
minScore = score;
}
}
// candidates from other previous hypos
if (!isFirst) {
for (uint hypoOffset = 1; hypoOffset < beamSize; ++hypoOffset) {
//printf("hypoInd=%d \n", (hypoInd + hypoOffset));
//printf("prevHypoInd=%, candidateInd=%d \n", prevHypoInd, candidateInd);
assert((hypoInd + hypoOffset) < costsWrap.size());
float prevCost = costsWrap[hypoInd + hypoOffset];
assert((hypoInd + hypoOffset) < hypo2CandidateWrap.size());
uint candidateInd = hypo2CandidateWrap[hypoInd + hypoOffset];
for (uint candidateOffset = 0; candidateOffset < beamSize; ++candidateOffset) {
assert((candidateInd + candidateOffset) < nBestCandidatesWrap.size());
NthOutBatch &candidate = nBestCandidatesWrap[candidateInd + candidateOffset];
candidate.score += prevCost;
assert(nextHypoInd < nBestWrap.size());
NthOutBatch *arr = &nBestWrap[nextHypoInd];
if (candidate.score > minScore) {
MergeElement(minScore, arr, beamSize, candidate);
}
}
}
}
batchInd += gridDim.x;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
void LogSoftmaxAndNBest(mblas::Vector<NthOutBatch> &nBest,
const Matrix& in,
const Matrix& b4,
const mblas::Vector<float> &costs,
bool forbidUNK,
uint maxBeamSize,
const std::vector<uint>& beamSizes,
uint beamSizeSum,
bool isFirst)
{
//BEGIN_TIMER("LogSoftmax excl kernels");
//cerr << "in=" << in.Debug(0) << endl;
//cerr << "beamSizes=" << beamSizes.size() << endl;
// create beam size vectors on GPU but exclude empty beams
uint batchSize = 0;
uint candidateInd = 0;
for (size_t batchInd = 0; batchInd < beamSizes.size(); ++batchInd) {
uint beamSize = beamSizes[batchInd];
//cerr << "(" << beamSize << "," << hypoInd << ") ";
if (beamSize) {
if (isFirst) {
candidateInd += beamSize;
}
else {
candidateInd += beamSize * beamSize;
}
++batchSize;
}
}
mblas::Vector<uint> d_beamSizes(beamSizes);
mblas::Vector<uint> hypo2BeamSize(in.dim(0));
mblas::Vector<uint> hypo2Candidate(in.dim(0));
mblas::Vector<uint> batch2Hypo(batchSize);
mblas::Vector<NthOutBatch> nBestCandidates(candidateInd);
/*
cerr << "in=" << in.Debug(0) << endl;
cerr << "beamSizes=" << beamSizes.size() << endl;
cerr << "beamSizeSum=" << beamSizeSum << endl;
cerr << "batchSize=" << batchSize << endl;
cerr << "candidateInd=" << candidateInd << endl;
cerr << "hypo2BeamSize=" << Debug(hypo2BeamSize, 0) << endl;
cerr << "hypo2Candidate=" << Debug(hypo2Candidate, 0) << endl;
cerr << "batch2Hypo=" << Debug(batch2Hypo, 0) << endl;
cerr << "nBest=" << Debug(nBest, 0) << endl;
cerr << "nBestCandidates=" << Debug(nBestCandidates, 0) << endl;
cerr << endl;
*/
MatrixWrapper<float> inWrap(in);
MatrixWrapper<float> b4Wrap(b4);
VectorWrapper<uint> hypo2BeamSizeWrap(hypo2BeamSize);
VectorWrapper<uint> hypo2CandidateWrap(hypo2Candidate);
VectorWrapper<uint> batch2HypoWrap(batch2Hypo);
VectorWrapper<NthOutBatch> nBestWrap(nBest);
VectorWrapper<NthOutBatch> nBestCandidatesWrap(nBestCandidates);
VectorWrapper<float> costsWrap(costs);
VectorWrapper<uint> beamSizesWrap(d_beamSizes);
//PAUSE_TIMER("LogSoftmax excl kernels");
int blocks = std::min(MAX_BLOCKS, (int)in.dim(0));
int threads = std::min(MAX_THREADS, (int)in.dim(1));
int shared = sizeof(NthOutBatch) * threads * maxBeamSize
+ sizeof(float) * threads;
//HANDLE_ERROR( cudaStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//cerr << "step0" << endl;
//BEGIN_TIMER("gBeamSizeInit");
gBeamSizeInit<<<1, 1, 0, CudaStreamHandler::GetStream()>>>
(hypo2BeamSizeWrap,
batch2HypoWrap,
hypo2CandidateWrap,
isFirst,
beamSizeSum,
beamSizesWrap
);
//PAUSE_TIMER("gBeamSizeInit");
/*
cerr << "hypo2BeamSize=" << Debug(hypo2BeamSize, 2) << endl;
cerr << "hypo2Candidate=" << Debug(hypo2Candidate, 2) << endl;
cerr << "batch2Hypo=" << Debug(batch2Hypo, 2) << endl;
cerr << endl;
*/
//HANDLE_ERROR( cudaStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//cerr << "step1" << endl;
//BEGIN_TIMER("gLogSoftMax");
gLogSoftMax<<<blocks, threads, shared, CudaStreamHandler::GetStream()>>>
(nBestCandidatesWrap,
inWrap,
b4Wrap,
maxBeamSize,
forbidUNK,
hypo2BeamSizeWrap,
hypo2CandidateWrap);
//PAUSE_TIMER("gLogSoftMax");
//HANDLE_ERROR( cudaStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//cerr << "step2" << endl;
//BEGIN_TIMER("gNBestPerBatch");
gNBestPerBatch<<<blocks, 1, 0, CudaStreamHandler::GetStream()>>>
(nBestWrap,
nBestCandidatesWrap,
inWrap,
costsWrap,
maxBeamSize,
forbidUNK,
isFirst,
hypo2BeamSizeWrap,
batch2HypoWrap,
hypo2CandidateWrap);
//PAUSE_TIMER("gNBestPerBatch");
//HANDLE_ERROR( cudaStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//cerr << "step3" << endl;
//cerr << "3costs=" << Debug(costs, 0) << endl;
}
void TestMemCpy()
{
using namespace std;
cerr << "Starting" << endl;
size_t NUM = 10;
vector<float> h_vec1(NUM);
for (size_t i = 0; i < NUM; ++i) {
h_vec1[i] = i * 3;
}
TestMemCpy(NUM, h_vec1.data());
cerr << "Finished" << endl;
}
} // namespace mblas
} // namespace GPU
} // namespace amunmt
|
58b43f4ea2caf3d21b2cdf5279b84ce7f2708f24.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief
* ragged
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <hipcub/hipcub.hpp>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/ragged.h"
namespace {
// will be used in RaggedShape::MaxSize(int32_t axis) to call
// hipcub::DeviceReduce::Max
struct RowSplitsDiff {
const int32_t *row_splits_data;
explicit RowSplitsDiff(const int32_t *row_splits)
: row_splits_data(row_splits) {}
// operator[] and operator+ are required by hipcub::DeviceReduce::Max
__device__ int32_t operator[](int32_t i) const {
return row_splits_data[i + 1] - row_splits_data[i];
}
__device__ RowSplitsDiff operator+(int32_t n) const {
RowSplitsDiff tmp(*this);
tmp.row_splits_data += n;
return tmp;
}
};
} // namespace
namespace std {
// vaule_type is required by hipcub::DeviceReduce::Max
template <>
struct iterator_traits<::RowSplitsDiff> {
typedef int32_t value_type;
};
} // namespace std
namespace k2 {
// Recursive function that prints (part of) a ragged shape.
// 0 <= begin_pos <= end_pos < shape.TotSize(axis).
void PrintRaggedShapePart(std::ostream &stream, const RaggedShape &shape,
int32_t axis, int32_t begin_pos, int32_t end_pos) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(axis >= 0 && axis < shape.NumAxes() && begin_pos >= 0 &&
begin_pos <= end_pos && end_pos <= shape.TotSize(axis));
for (int32_t d = begin_pos; d < end_pos; ++d) {
if (axis == shape.NumAxes() - 1) {
stream << "x ";
} else {
stream << "[ ";
const int32_t *row_splits = shape.RowSplits(axis + 1).Data();
K2_DCHECK(d < shape.RowSplits(axis + 1).Dim());
int32_t row_start = row_splits[d], row_end = row_splits[d + 1];
PrintRaggedShapePart(stream, shape, axis + 1, row_start, row_end);
stream << "] ";
}
}
}
// prints a RaggedShape as e.g. [ [ 0 1 ] [ 2 ] [] ]. Note, the 'values'
// are just the positions in the array, this is for readability.
std::ostream &operator<<(std::ostream &stream, const RaggedShape &shape) {
if (shape.Context()->GetDeviceType() != kCpu) {
return stream << shape.To(GetCpuContext());
} else {
bool print_warnings = false;
if (shape.Validate(print_warnings)) {
stream << "[ ";
PrintRaggedShapePart(stream, shape, 0, 0, shape.Dim0());
stream << "]";
return stream;
} else {
// For non-valid shapes, print the raw info.
stream << "Invalid RaggedShape: { ";
stream << " num-axes = " << shape.NumAxes();
for (int32_t i = 1; i < shape.NumAxes(); i++) {
const RaggedShapeLayer &layer = shape.Layers()[i - 1];
if (layer.row_splits.IsValid())
stream << " RowSplits(" << i << ")=" << layer.row_splits;
if (layer.row_ids.IsValid())
stream << "RowIds(" << i << ")=" << layer.row_ids;
stream << "cached_tot_size[" << i << "]=" << layer.cached_tot_size;
}
return stream << " }";
}
}
}
Array1<int32_t> &RaggedShape::RowIds(int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
RaggedShapeLayer &rsd = layers_[axis - 1];
auto &row_splits = rsd.row_splits;
auto &row_ids = rsd.row_ids;
// there must be row_splits.Dim() >=1 according to the definition of
// RaggedShapeLayer.
K2_CHECK_GE(row_splits.Dim(), 1);
if (!row_ids.IsValid()) {
if (rsd.cached_tot_size < 0)
rsd.cached_tot_size = row_splits[row_splits.Dim() - 1];
// create row_ids as it does not exist
row_ids = Array1<int32_t>(Context(), rsd.cached_tot_size);
const int32_t *row_splits_data = row_splits.Data();
int32_t *row_ids_data = row_ids.Data();
RowSplitsToRowIds(Context(), row_splits.Dim() - 1, row_splits_data,
row_ids.Dim(), row_ids_data);
}
return row_ids;
}
int32_t RaggedShape::MaxSize(int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
const auto &row_splits = layers_[axis - 1].row_splits;
const int32_t num_rows = row_splits.Dim() - 1;
if (num_rows == 0) return 0;
const int32_t *row_splits_data = row_splits.Data();
ContextPtr c = Context();
if (c->GetDeviceType() == kCpu) {
int32_t max_value = 0;
for (int32_t i = 0; i < num_rows; ++i) {
int32_t value = row_splits_data[i + 1] - row_splits_data[i];
if (value > max_value) max_value = value;
}
return max_value;
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
::RowSplitsDiff row_splits_diff(row_splits_data);
Array1<int32_t> max_array(Context(), 1, 0);
int32_t *max_value = max_array.Data();
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// the first time is to determine temporary device storage requirements
K2_CUDA_SAFE_CALL(hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes,
row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
void *deleter_context;
d_temp_storage = c->Allocate(temp_storage_bytes, &deleter_context);
K2_CUDA_SAFE_CALL(hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes,
row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
c->Deallocate(d_temp_storage, deleter_context);
// this will convert to memory on CPU
return max_array[0];
}
}
RaggedShape RaggedShape::Index(int32_t axis, int32_t i,
int32_t *value_offset /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
// only support `axis == 0` for now
K2_CHECK_EQ(axis, 0);
K2_CHECK_GE(i, 0);
int32_t num_axes = NumAxes();
K2_CHECK_GT(num_axes, 2);
const auto &src_axes = Layers();
K2_CHECK_LT(i + 1, src_axes[0].row_splits.Dim());
if (i == 0 && Dim0() == 1) {
// Just remove first axis. Common case so we make it efficient.
std::vector<RaggedShapeLayer> ans_axes(src_axes.begin() + 1, src_axes.end());
if (value_offset) *value_offset = 0;
return RaggedShape(ans_axes, false);
}
int32_t idx_begin = (i != 0 ? src_axes[0].row_splits[i] : 0),
idx_end = src_axes[0].row_splits[i + 1];
std::vector<RaggedShapeLayer> axes(src_axes.size() - 1);
ContextPtr c = Context();
for (int32_t i = 2; i < num_axes; ++i) {
const Array1<int32_t> &src_row_splits = RowSplits(i),
&src_row_ids = RowIds(i);
int32_t idx_begin_next = (idx_begin != 0 ? src_row_splits[idx_begin] : 0),
idx_end_next = src_row_splits[idx_end];
axes[i - 2].row_splits =
src_row_splits.Range(idx_begin, idx_end - idx_begin + 1);
if (idx_begin_next != 0)
axes[i - 2].row_splits = Minus(axes[i - 2].row_splits, idx_begin_next);
axes[i - 2].row_ids =
src_row_ids.Range(idx_begin_next, idx_end_next - idx_begin_next);
if (idx_begin != 0)
axes[i - 2].row_ids = Minus(axes[i - 2].row_ids, idx_begin);
axes[i - 2].cached_tot_size = idx_end_next - idx_begin_next;
idx_begin = idx_begin_next;
idx_end = idx_end_next;
}
if (value_offset) *value_offset = idx_begin;
return RaggedShape(axes);
}
void RaggedShape::Populate() {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = NumAxes();
ParallelRunner pr(this->Context());
for (int32_t i = 1; i < num_axes; ++i) {
With w(pr.NewStream());
// ignore return values of the following calls.
this->TotSize(i);
this->RowIds(i);
}
}
RaggedShape RaggedShape::To(ContextPtr ctx) const {
NVTX_RANGE(K2_FUNC);
if (ctx->IsCompatible(*Context())) return *this;
std::vector<RaggedShapeLayer> axes(layers_.size());
int32_t num_axes = NumAxes();
for (int32_t i = 1; i < num_axes; ++i) {
axes[i - 1].row_splits = layers_[i - 1].row_splits.To(ctx);
// leave row_ids and cached_tot_size unset
axes[i - 1].cached_tot_size = -1;
}
return RaggedShape(axes);
}
RaggedShapeIndexIterator RaggedShape::Iterator() {
return RaggedShapeIndexIterator(*this);
}
int32_t RaggedShape::operator[](const std::vector<int32_t> &indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(static_cast<int32_t>(indexes.size()), NumAxes());
K2_CHECK_EQ(Context()->GetDeviceType(), kCpu);
int32_t cur_idx = indexes[0];
for (int32_t i = 1; i < NumAxes(); i++) {
Array1<int32_t> &row_splits = layers_[i - 1].row_splits;
K2_CHECK(cur_idx >= 0 && cur_idx + 1 < row_splits.Dim());
cur_idx = row_splits[cur_idx];
cur_idx += indexes[i];
}
return cur_idx;
}
int32_t RaggedShape::TotSize(int32_t axis) const {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(axis, 0);
K2_CHECK_LT(axis, NumAxes());
if (axis == 0)
return Dim0();
else {
const RaggedShapeLayer &rsd = layers_[axis - 1];
if (rsd.cached_tot_size >= 0) {
return rsd.cached_tot_size;
} else {
// if we had row_ids set up, we should have set cached_tot_size.
K2_CHECK_EQ(rsd.row_ids.Dim(), 0);
K2_CHECK_GT(rsd.row_splits.Dim(), 0);
const_cast<RaggedShapeLayer &>(rsd).cached_tot_size = rsd.row_splits.Back();
return rsd.cached_tot_size;
}
}
}
// TODO(dan): change this so that on error it prints a warning if
// print_warnings==true, and then returns false.
bool RaggedShape::Validate(bool print_warnings) const {
NVTX_RANGE(K2_FUNC);
ContextPtr c = Context();
int32_t num_axes = layers_.size();
ParallelRunner pr(c);
for (int32_t axis = 0; axis < num_axes; ++axis) {
With w(pr.NewStream());
const RaggedShapeLayer &rsd = layers_[axis];
K2_CHECK_GE(rsd.row_splits.Dim(), 0);
if (rsd.cached_tot_size >= 0) {
if (!(rsd.row_splits.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_splits.Back())) {
if (print_warnings)
K2_LOG(WARNING)
<< "Ragged shape validation failed, row_splits.Back()="
<< rsd.row_splits.Back()
<< " vs. cached-tot-size=" << rsd.cached_tot_size;
return false;
}
if (!((rsd.row_ids.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_ids.Dim()))) {
if (print_warnings)
K2_LOG(WARNING) << "Ragged shape validation failed, row_ids.Dim()="
<< rsd.row_ids.Dim()
<< " vs. cached-tot-size=" << rsd.cached_tot_size;
return false;
}
} else {
if (rsd.cached_tot_size != -1 || rsd.row_ids.Dim() != 0) {
if (print_warnings)
K2_LOG(WARNING) << "Ragged shape validation failed, cached_tot_size="
<< rsd.cached_tot_size
<< ", row-ids.Dim()=" << rsd.row_ids.Dim();
return false;
}
}
int32_t num_elems;
// Check row_splits.
{
// meta[0] is a bool, ok == 1, not-ok == 0.
// meta[1] will contain the number of row_splits.
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *num_elems_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data();
int32_t num_rows = rsd.row_splits.Dim() - 1;
K2_EVAL(
c, num_rows + 1, lambda_check_row_splits, (int32_t i)->void {
int32_t this_idx = row_splits_data[i];
if (i == 0 && this_idx != 0) *ok_data = 0;
if (i < num_rows) {
int32_t next_idx = row_splits_data[i + 1];
if (next_idx < this_idx) *ok_data = 0;
} else {
K2_CHECK(i == num_rows);
*num_elems_data = this_idx;
}
});
meta = meta.To(GetCpuContext());
num_elems = meta[1];
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-splits: for layers_[" << axis
<< "], row_splits = " << rsd.row_splits;
}
if (rsd.cached_tot_size > 0 && rsd.cached_tot_size != num_elems) {
K2_LOG(FATAL) << "Problem validating row-splits: for layers_[" << axis
<< "], row_splits[-1] = " << num_elems
<< " but cached_tot_size == " << rsd.cached_tot_size;
}
}
if (axis + 1 < num_axes) {
int32_t next_num_rows = layers_[axis + 1].row_splits.Dim() - 1;
if (num_elems != next_num_rows) {
K2_LOG(FATAL) << "Ragged shape has num_elems for layers_[" << axis
<< "] == " << num_elems << " and num-rows for layers_["
<< (axis + 1) << "] == " << next_num_rows;
}
}
if (rsd.row_ids.Dim() != 0) { // check row_ids.
K2_CHECK(IsCompatible(rsd.row_ids, rsd.row_splits));
// 1st elem is `ok` (1 or 0); 2nd elem is location of bad index
// into row_splits
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *bad_index_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data(),
*row_ids_data = rsd.row_ids.Data();
int32_t num_elems_from_row_ids = rsd.row_ids.Dim(),
num_rows = rsd.row_splits.Dim() - 1;
K2_CHECK_EQ(num_elems, num_elems_from_row_ids);
// TODO: could do this and the other one in separate streams.
K2_EVAL(
c, num_elems, lambda_check_row_ids, (int32_t i)->void {
int32_t this_row = row_ids_data[i];
if (this_row < 0 || this_row >= num_rows ||
i < row_splits_data[this_row] ||
i >= row_splits_data[this_row + 1]) {
*ok_data = 0;
*bad_index_data = i;
}
});
meta = meta.To(GetCpuContext()); // since we have 2 accesses, this should
// be faster.
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-ids: for layers_[" << axis
<< "], row_splits = " << rsd.row_splits
<< ", row_ids = " << rsd.row_ids << ", see index "
<< meta[1] << " of row_ids, whose dim is "
<< rsd.row_ids.Dim();
}
}
if (axis + 1 < layers_.size()) {
K2_CHECK(IsCompatible(rsd.row_splits, layers_[axis + 1].row_splits));
}
}
return true;
}
bool Equal(RaggedShape &a, RaggedShape &b) {
NVTX_RANGE(K2_FUNC);
if (a.NumAxes() != b.NumAxes()) return false;
for (int32_t i = 1; i < a.NumAxes(); i++) {
if (a.RowSplits(i).Dim() != b.RowSplits(i).Dim() ||
!Equal(a.RowSplits(i), b.RowSplits(i)))
return false;
}
return true;
}
std::istream &operator>>(std::istream &is, RaggedShape &shape) {
NVTX_RANGE(K2_FUNC);
// Note: element 0 of 'row_splits' will end up being
// discarded; the others will become the axes of `shape`.
std::vector<std::vector<int32_t>> row_splits;
int32_t cur_level = 0, num_elems = 0;
while (1) {
is >> std::ws; // eat whitespace
if (!is.good()) {
is.setstate(std::ios::failbit);
return is;
}
int c = is.get();
if (c == static_cast<int32_t>('[')) {
cur_level++;
while (row_splits.size() < static_cast<size_t>(cur_level)) {
if (num_elems != 0) {
is.setstate(std::ios::failbit);
return is;
}
row_splits.push_back(std::vector<int32_t>(1, 0));
}
} else if (c == static_cast<int32_t>(']')) {
cur_level--;
if (cur_level <= 0) { // Done; return...
if (cur_level < 0) { // ']' without '['.
is.setstate(std::ios::failbit);
return is;
}
row_splits.erase(row_splits.begin());
if (row_splits.empty()) {
// Assume 2 axes even though the num-axes is ambiguous from the input.
// row_splits is 0 0.
row_splits.push_back(std::vector<int32_t>(1, 0));
}
std::vector<RaggedShapeLayer> axes(row_splits.size());
for (size_t i = 0; i < row_splits.size(); i++) {
axes[i].row_splits = Array1<int32_t>(GetCpuContext(), row_splits[i]);
axes[i].cached_tot_size = -1;
}
shape = RaggedShape(axes);
return is;
}
row_splits[cur_level].push_back(
(cur_level + 1 >= row_splits.size())
? num_elems
: (row_splits[cur_level + 1].size() - 1));
} else if (c == static_cast<int32_t>('x')) {
if (cur_level != static_cast<int32_t>(row_splits.size()) ||
cur_level < 2) {
is.setstate(std::ios::failbit);
return is;
}
num_elems++;
} else {
is.setstate(std::ios::failbit);
return is;
}
}
}
} // namespace k2
| 58b43f4ea2caf3d21b2cdf5279b84ce7f2708f24.cu | /**
* @brief
* ragged
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <cub/cub.cuh>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/ragged.h"
namespace {
// will be used in RaggedShape::MaxSize(int32_t axis) to call
// cub::DeviceReduce::Max
struct RowSplitsDiff {
const int32_t *row_splits_data;
explicit RowSplitsDiff(const int32_t *row_splits)
: row_splits_data(row_splits) {}
// operator[] and operator+ are required by cub::DeviceReduce::Max
__device__ int32_t operator[](int32_t i) const {
return row_splits_data[i + 1] - row_splits_data[i];
}
__device__ RowSplitsDiff operator+(int32_t n) const {
RowSplitsDiff tmp(*this);
tmp.row_splits_data += n;
return tmp;
}
};
} // namespace
namespace std {
// vaule_type is required by cub::DeviceReduce::Max
template <>
struct iterator_traits<::RowSplitsDiff> {
typedef int32_t value_type;
};
} // namespace std
namespace k2 {
// Recursive function that prints (part of) a ragged shape.
// 0 <= begin_pos <= end_pos < shape.TotSize(axis).
void PrintRaggedShapePart(std::ostream &stream, const RaggedShape &shape,
int32_t axis, int32_t begin_pos, int32_t end_pos) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(axis >= 0 && axis < shape.NumAxes() && begin_pos >= 0 &&
begin_pos <= end_pos && end_pos <= shape.TotSize(axis));
for (int32_t d = begin_pos; d < end_pos; ++d) {
if (axis == shape.NumAxes() - 1) {
stream << "x ";
} else {
stream << "[ ";
const int32_t *row_splits = shape.RowSplits(axis + 1).Data();
K2_DCHECK(d < shape.RowSplits(axis + 1).Dim());
int32_t row_start = row_splits[d], row_end = row_splits[d + 1];
PrintRaggedShapePart(stream, shape, axis + 1, row_start, row_end);
stream << "] ";
}
}
}
// prints a RaggedShape as e.g. [ [ 0 1 ] [ 2 ] [] ]. Note, the 'values'
// are just the positions in the array, this is for readability.
std::ostream &operator<<(std::ostream &stream, const RaggedShape &shape) {
if (shape.Context()->GetDeviceType() != kCpu) {
return stream << shape.To(GetCpuContext());
} else {
bool print_warnings = false;
if (shape.Validate(print_warnings)) {
stream << "[ ";
PrintRaggedShapePart(stream, shape, 0, 0, shape.Dim0());
stream << "]";
return stream;
} else {
// For non-valid shapes, print the raw info.
stream << "Invalid RaggedShape: { ";
stream << " num-axes = " << shape.NumAxes();
for (int32_t i = 1; i < shape.NumAxes(); i++) {
const RaggedShapeLayer &layer = shape.Layers()[i - 1];
if (layer.row_splits.IsValid())
stream << " RowSplits(" << i << ")=" << layer.row_splits;
if (layer.row_ids.IsValid())
stream << "RowIds(" << i << ")=" << layer.row_ids;
stream << "cached_tot_size[" << i << "]=" << layer.cached_tot_size;
}
return stream << " }";
}
}
}
Array1<int32_t> &RaggedShape::RowIds(int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
RaggedShapeLayer &rsd = layers_[axis - 1];
auto &row_splits = rsd.row_splits;
auto &row_ids = rsd.row_ids;
// there must be row_splits.Dim() >=1 according to the definition of
// RaggedShapeLayer.
K2_CHECK_GE(row_splits.Dim(), 1);
if (!row_ids.IsValid()) {
if (rsd.cached_tot_size < 0)
rsd.cached_tot_size = row_splits[row_splits.Dim() - 1];
// create row_ids as it does not exist
row_ids = Array1<int32_t>(Context(), rsd.cached_tot_size);
const int32_t *row_splits_data = row_splits.Data();
int32_t *row_ids_data = row_ids.Data();
RowSplitsToRowIds(Context(), row_splits.Dim() - 1, row_splits_data,
row_ids.Dim(), row_ids_data);
}
return row_ids;
}
int32_t RaggedShape::MaxSize(int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
const auto &row_splits = layers_[axis - 1].row_splits;
const int32_t num_rows = row_splits.Dim() - 1;
if (num_rows == 0) return 0;
const int32_t *row_splits_data = row_splits.Data();
ContextPtr c = Context();
if (c->GetDeviceType() == kCpu) {
int32_t max_value = 0;
for (int32_t i = 0; i < num_rows; ++i) {
int32_t value = row_splits_data[i + 1] - row_splits_data[i];
if (value > max_value) max_value = value;
}
return max_value;
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
::RowSplitsDiff row_splits_diff(row_splits_data);
Array1<int32_t> max_array(Context(), 1, 0);
int32_t *max_value = max_array.Data();
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// the first time is to determine temporary device storage requirements
K2_CUDA_SAFE_CALL(cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes,
row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
void *deleter_context;
d_temp_storage = c->Allocate(temp_storage_bytes, &deleter_context);
K2_CUDA_SAFE_CALL(cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes,
row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
c->Deallocate(d_temp_storage, deleter_context);
// this will convert to memory on CPU
return max_array[0];
}
}
RaggedShape RaggedShape::Index(int32_t axis, int32_t i,
int32_t *value_offset /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
// only support `axis == 0` for now
K2_CHECK_EQ(axis, 0);
K2_CHECK_GE(i, 0);
int32_t num_axes = NumAxes();
K2_CHECK_GT(num_axes, 2);
const auto &src_axes = Layers();
K2_CHECK_LT(i + 1, src_axes[0].row_splits.Dim());
if (i == 0 && Dim0() == 1) {
// Just remove first axis. Common case so we make it efficient.
std::vector<RaggedShapeLayer> ans_axes(src_axes.begin() + 1, src_axes.end());
if (value_offset) *value_offset = 0;
return RaggedShape(ans_axes, false);
}
int32_t idx_begin = (i != 0 ? src_axes[0].row_splits[i] : 0),
idx_end = src_axes[0].row_splits[i + 1];
std::vector<RaggedShapeLayer> axes(src_axes.size() - 1);
ContextPtr c = Context();
for (int32_t i = 2; i < num_axes; ++i) {
const Array1<int32_t> &src_row_splits = RowSplits(i),
&src_row_ids = RowIds(i);
int32_t idx_begin_next = (idx_begin != 0 ? src_row_splits[idx_begin] : 0),
idx_end_next = src_row_splits[idx_end];
axes[i - 2].row_splits =
src_row_splits.Range(idx_begin, idx_end - idx_begin + 1);
if (idx_begin_next != 0)
axes[i - 2].row_splits = Minus(axes[i - 2].row_splits, idx_begin_next);
axes[i - 2].row_ids =
src_row_ids.Range(idx_begin_next, idx_end_next - idx_begin_next);
if (idx_begin != 0)
axes[i - 2].row_ids = Minus(axes[i - 2].row_ids, idx_begin);
axes[i - 2].cached_tot_size = idx_end_next - idx_begin_next;
idx_begin = idx_begin_next;
idx_end = idx_end_next;
}
if (value_offset) *value_offset = idx_begin;
return RaggedShape(axes);
}
void RaggedShape::Populate() {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = NumAxes();
ParallelRunner pr(this->Context());
for (int32_t i = 1; i < num_axes; ++i) {
With w(pr.NewStream());
// ignore return values of the following calls.
this->TotSize(i);
this->RowIds(i);
}
}
RaggedShape RaggedShape::To(ContextPtr ctx) const {
NVTX_RANGE(K2_FUNC);
if (ctx->IsCompatible(*Context())) return *this;
std::vector<RaggedShapeLayer> axes(layers_.size());
int32_t num_axes = NumAxes();
for (int32_t i = 1; i < num_axes; ++i) {
axes[i - 1].row_splits = layers_[i - 1].row_splits.To(ctx);
// leave row_ids and cached_tot_size unset
axes[i - 1].cached_tot_size = -1;
}
return RaggedShape(axes);
}
RaggedShapeIndexIterator RaggedShape::Iterator() {
return RaggedShapeIndexIterator(*this);
}
int32_t RaggedShape::operator[](const std::vector<int32_t> &indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(static_cast<int32_t>(indexes.size()), NumAxes());
K2_CHECK_EQ(Context()->GetDeviceType(), kCpu);
int32_t cur_idx = indexes[0];
for (int32_t i = 1; i < NumAxes(); i++) {
Array1<int32_t> &row_splits = layers_[i - 1].row_splits;
K2_CHECK(cur_idx >= 0 && cur_idx + 1 < row_splits.Dim());
cur_idx = row_splits[cur_idx];
cur_idx += indexes[i];
}
return cur_idx;
}
int32_t RaggedShape::TotSize(int32_t axis) const {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(axis, 0);
K2_CHECK_LT(axis, NumAxes());
if (axis == 0)
return Dim0();
else {
const RaggedShapeLayer &rsd = layers_[axis - 1];
if (rsd.cached_tot_size >= 0) {
return rsd.cached_tot_size;
} else {
// if we had row_ids set up, we should have set cached_tot_size.
K2_CHECK_EQ(rsd.row_ids.Dim(), 0);
K2_CHECK_GT(rsd.row_splits.Dim(), 0);
const_cast<RaggedShapeLayer &>(rsd).cached_tot_size = rsd.row_splits.Back();
return rsd.cached_tot_size;
}
}
}
// TODO(dan): change this so that on error it prints a warning if
// print_warnings==true, and then returns false.
bool RaggedShape::Validate(bool print_warnings) const {
NVTX_RANGE(K2_FUNC);
ContextPtr c = Context();
int32_t num_axes = layers_.size();
ParallelRunner pr(c);
for (int32_t axis = 0; axis < num_axes; ++axis) {
With w(pr.NewStream());
const RaggedShapeLayer &rsd = layers_[axis];
K2_CHECK_GE(rsd.row_splits.Dim(), 0);
if (rsd.cached_tot_size >= 0) {
if (!(rsd.row_splits.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_splits.Back())) {
if (print_warnings)
K2_LOG(WARNING)
<< "Ragged shape validation failed, row_splits.Back()="
<< rsd.row_splits.Back()
<< " vs. cached-tot-size=" << rsd.cached_tot_size;
return false;
}
if (!((rsd.row_ids.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_ids.Dim()))) {
if (print_warnings)
K2_LOG(WARNING) << "Ragged shape validation failed, row_ids.Dim()="
<< rsd.row_ids.Dim()
<< " vs. cached-tot-size=" << rsd.cached_tot_size;
return false;
}
} else {
if (rsd.cached_tot_size != -1 || rsd.row_ids.Dim() != 0) {
if (print_warnings)
K2_LOG(WARNING) << "Ragged shape validation failed, cached_tot_size="
<< rsd.cached_tot_size
<< ", row-ids.Dim()=" << rsd.row_ids.Dim();
return false;
}
}
int32_t num_elems;
// Check row_splits.
{
// meta[0] is a bool, ok == 1, not-ok == 0.
// meta[1] will contain the number of row_splits.
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *num_elems_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data();
int32_t num_rows = rsd.row_splits.Dim() - 1;
K2_EVAL(
c, num_rows + 1, lambda_check_row_splits, (int32_t i)->void {
int32_t this_idx = row_splits_data[i];
if (i == 0 && this_idx != 0) *ok_data = 0;
if (i < num_rows) {
int32_t next_idx = row_splits_data[i + 1];
if (next_idx < this_idx) *ok_data = 0;
} else {
K2_CHECK(i == num_rows);
*num_elems_data = this_idx;
}
});
meta = meta.To(GetCpuContext());
num_elems = meta[1];
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-splits: for layers_[" << axis
<< "], row_splits = " << rsd.row_splits;
}
if (rsd.cached_tot_size > 0 && rsd.cached_tot_size != num_elems) {
K2_LOG(FATAL) << "Problem validating row-splits: for layers_[" << axis
<< "], row_splits[-1] = " << num_elems
<< " but cached_tot_size == " << rsd.cached_tot_size;
}
}
if (axis + 1 < num_axes) {
int32_t next_num_rows = layers_[axis + 1].row_splits.Dim() - 1;
if (num_elems != next_num_rows) {
K2_LOG(FATAL) << "Ragged shape has num_elems for layers_[" << axis
<< "] == " << num_elems << " and num-rows for layers_["
<< (axis + 1) << "] == " << next_num_rows;
}
}
if (rsd.row_ids.Dim() != 0) { // check row_ids.
K2_CHECK(IsCompatible(rsd.row_ids, rsd.row_splits));
// 1st elem is `ok` (1 or 0); 2nd elem is location of bad index
// into row_splits
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *bad_index_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data(),
*row_ids_data = rsd.row_ids.Data();
int32_t num_elems_from_row_ids = rsd.row_ids.Dim(),
num_rows = rsd.row_splits.Dim() - 1;
K2_CHECK_EQ(num_elems, num_elems_from_row_ids);
// TODO: could do this and the other one in separate streams.
K2_EVAL(
c, num_elems, lambda_check_row_ids, (int32_t i)->void {
int32_t this_row = row_ids_data[i];
if (this_row < 0 || this_row >= num_rows ||
i < row_splits_data[this_row] ||
i >= row_splits_data[this_row + 1]) {
*ok_data = 0;
*bad_index_data = i;
}
});
meta = meta.To(GetCpuContext()); // since we have 2 accesses, this should
// be faster.
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-ids: for layers_[" << axis
<< "], row_splits = " << rsd.row_splits
<< ", row_ids = " << rsd.row_ids << ", see index "
<< meta[1] << " of row_ids, whose dim is "
<< rsd.row_ids.Dim();
}
}
if (axis + 1 < layers_.size()) {
K2_CHECK(IsCompatible(rsd.row_splits, layers_[axis + 1].row_splits));
}
}
return true;
}
bool Equal(RaggedShape &a, RaggedShape &b) {
NVTX_RANGE(K2_FUNC);
if (a.NumAxes() != b.NumAxes()) return false;
for (int32_t i = 1; i < a.NumAxes(); i++) {
if (a.RowSplits(i).Dim() != b.RowSplits(i).Dim() ||
!Equal(a.RowSplits(i), b.RowSplits(i)))
return false;
}
return true;
}
std::istream &operator>>(std::istream &is, RaggedShape &shape) {
NVTX_RANGE(K2_FUNC);
// Note: element 0 of 'row_splits' will end up being
// discarded; the others will become the axes of `shape`.
std::vector<std::vector<int32_t>> row_splits;
int32_t cur_level = 0, num_elems = 0;
while (1) {
is >> std::ws; // eat whitespace
if (!is.good()) {
is.setstate(std::ios::failbit);
return is;
}
int c = is.get();
if (c == static_cast<int32_t>('[')) {
cur_level++;
while (row_splits.size() < static_cast<size_t>(cur_level)) {
if (num_elems != 0) {
is.setstate(std::ios::failbit);
return is;
}
row_splits.push_back(std::vector<int32_t>(1, 0));
}
} else if (c == static_cast<int32_t>(']')) {
cur_level--;
if (cur_level <= 0) { // Done; return...
if (cur_level < 0) { // ']' without '['.
is.setstate(std::ios::failbit);
return is;
}
row_splits.erase(row_splits.begin());
if (row_splits.empty()) {
// Assume 2 axes even though the num-axes is ambiguous from the input.
// row_splits is 0 0.
row_splits.push_back(std::vector<int32_t>(1, 0));
}
std::vector<RaggedShapeLayer> axes(row_splits.size());
for (size_t i = 0; i < row_splits.size(); i++) {
axes[i].row_splits = Array1<int32_t>(GetCpuContext(), row_splits[i]);
axes[i].cached_tot_size = -1;
}
shape = RaggedShape(axes);
return is;
}
row_splits[cur_level].push_back(
(cur_level + 1 >= row_splits.size())
? num_elems
: (row_splits[cur_level + 1].size() - 1));
} else if (c == static_cast<int32_t>('x')) {
if (cur_level != static_cast<int32_t>(row_splits.size()) ||
cur_level < 2) {
is.setstate(std::ios::failbit);
return is;
}
num_elems++;
} else {
is.setstate(std::ios::failbit);
return is;
}
}
}
} // namespace k2
|
aaf336ab245d64ed54f83e22198951dd380614bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH.h>
#include <THHGeneral.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define VEC_0(ARRAY) ((ARRAY).x)
#define VEC_1(ARRAY) ((ARRAY).y)
#define VEC_2(ARRAY) ((ARRAY).z)
#define VEC_3(ARRAY) ((ARRAY).w)
#define IDX_1(ARRAY, X) ((ARRAY)[((X) * (ARRAY##_stride.x))])
#define IDX_2(ARRAY, X, Y) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y))])
#define IDX_3(ARRAY, X, Y, Z) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z))])
#define IDX_4(ARRAY, X, Y, Z, W) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z)) + ((W) * (ARRAY##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
__global__ void kernel_ChannelNorm_updateOutput(
const int n,
const float* input1, const long4 input1_size, const long4 input1_stride,
float* output, const long4 output_size, const long4 output_stride,
int norm_deg
) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = VEC_0(output_size);
int dim_c = VEC_1(output_size);
int dim_h = VEC_2(output_size);
int dim_w = VEC_3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int i1dim_c = VEC_1(input1_size);
int i1dim_h = VEC_2(input1_size);
int i1dim_w = VEC_3(input1_size);
int i1dim_chw = i1dim_c * i1dim_h * i1dim_w;
int i1dim_hw = i1dim_h * i1dim_w;
float result = 0.0;
for (int c = 0; c < i1dim_c; ++c) {
int i1Index = b * i1dim_chw + c * i1dim_hw + y * i1dim_w + x;
float val = input1[i1Index];
result += val * val;
}
result = sqrt(result);
output[index] = result;
}
__global__ void kernel_ChannelNorm_backward_input1(
const int n,
const float* input1, const long4 input1_size, const long4 input1_stride,
const float* output, const long4 output_size, const long4 output_stride,
const float* gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
float* gradInput, const long4 gradInput_size, const long4 gradInput_stride,
int norm_deg
) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
float val = 0.0;
int dim_b = VEC_0(gradInput_size);
int dim_c = VEC_1(gradInput_size);
int dim_h = VEC_2(gradInput_size);
int dim_w = VEC_3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int outIndex = b * dim_hw + y * dim_w + x;
val = gradOutput[outIndex] * input1[index] / (output[outIndex]+1e-9);
gradInput[index] = val;
}
void ChannelNorm_kernel_forward(
THCState* state,
THCudaTensor* input1,
THCudaTensor* output,
int norm_deg
) {
int n = 0;
n = THCudaTensor_nElement(state, output);
hipLaunchKernelGGL(( kernel_ChannelNorm_updateOutput), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
n,
THCudaTensor_data(state, input1), make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]), make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]),
THCudaTensor_data(state, output), make_long4(output->size[0], output->size[1], output->size[2], output->size[3]), make_long4(output->stride[0], output->stride[1], output->stride[2], output->stride[3]),
norm_deg
);
THCudaCheck(hipGetLastError());
}
void ChannelNorm_kernel_backward(
THCState* state,
THCudaTensor* input1,
THCudaTensor* output,
THCudaTensor* gradOutput,
THCudaTensor* gradInput1,
int norm_deg
) {
int n = 0;
n = THCudaTensor_nElement(state, gradInput1);
hipLaunchKernelGGL(( kernel_ChannelNorm_backward_input1), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
n,
THCudaTensor_data(state, input1), make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]), make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]),
THCudaTensor_data(state, output), make_long4(output->size[0], output->size[1], output->size[2], output->size[3]), make_long4(output->stride[0], output->stride[1], output->stride[2], output->stride[3]),
THCudaTensor_data(state, gradOutput), make_long4(gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]), make_long4(gradOutput->stride[0], gradOutput->stride[1], gradOutput->stride[2], gradOutput->stride[3]),
THCudaTensor_data(state, gradInput1), make_long4(gradInput1->size[0], gradInput1->size[1], gradInput1->size[2], gradInput1->size[3]), make_long4(gradInput1->stride[0], gradInput1->stride[1], gradInput1->stride[2], gradInput1->stride[3]),
norm_deg
);
THCudaCheck(hipGetLastError());
}
#ifdef __cplusplus
}
#endif | aaf336ab245d64ed54f83e22198951dd380614bd.cu | #include <THC.h>
#include <THCGeneral.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define VEC_0(ARRAY) ((ARRAY).x)
#define VEC_1(ARRAY) ((ARRAY).y)
#define VEC_2(ARRAY) ((ARRAY).z)
#define VEC_3(ARRAY) ((ARRAY).w)
#define IDX_1(ARRAY, X) ((ARRAY)[((X) * (ARRAY##_stride.x))])
#define IDX_2(ARRAY, X, Y) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y))])
#define IDX_3(ARRAY, X, Y, Z) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z))])
#define IDX_4(ARRAY, X, Y, Z, W) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z)) + ((W) * (ARRAY##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
__global__ void kernel_ChannelNorm_updateOutput(
const int n,
const float* input1, const long4 input1_size, const long4 input1_stride,
float* output, const long4 output_size, const long4 output_stride,
int norm_deg
) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = VEC_0(output_size);
int dim_c = VEC_1(output_size);
int dim_h = VEC_2(output_size);
int dim_w = VEC_3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int i1dim_c = VEC_1(input1_size);
int i1dim_h = VEC_2(input1_size);
int i1dim_w = VEC_3(input1_size);
int i1dim_chw = i1dim_c * i1dim_h * i1dim_w;
int i1dim_hw = i1dim_h * i1dim_w;
float result = 0.0;
for (int c = 0; c < i1dim_c; ++c) {
int i1Index = b * i1dim_chw + c * i1dim_hw + y * i1dim_w + x;
float val = input1[i1Index];
result += val * val;
}
result = sqrt(result);
output[index] = result;
}
__global__ void kernel_ChannelNorm_backward_input1(
const int n,
const float* input1, const long4 input1_size, const long4 input1_stride,
const float* output, const long4 output_size, const long4 output_stride,
const float* gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
float* gradInput, const long4 gradInput_size, const long4 gradInput_stride,
int norm_deg
) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
float val = 0.0;
int dim_b = VEC_0(gradInput_size);
int dim_c = VEC_1(gradInput_size);
int dim_h = VEC_2(gradInput_size);
int dim_w = VEC_3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int outIndex = b * dim_hw + y * dim_w + x;
val = gradOutput[outIndex] * input1[index] / (output[outIndex]+1e-9);
gradInput[index] = val;
}
void ChannelNorm_kernel_forward(
THCState* state,
THCudaTensor* input1,
THCudaTensor* output,
int norm_deg
) {
int n = 0;
n = THCudaTensor_nElement(state, output);
kernel_ChannelNorm_updateOutput<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
n,
THCudaTensor_data(state, input1), make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]), make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]),
THCudaTensor_data(state, output), make_long4(output->size[0], output->size[1], output->size[2], output->size[3]), make_long4(output->stride[0], output->stride[1], output->stride[2], output->stride[3]),
norm_deg
);
THCudaCheck(cudaGetLastError());
}
void ChannelNorm_kernel_backward(
THCState* state,
THCudaTensor* input1,
THCudaTensor* output,
THCudaTensor* gradOutput,
THCudaTensor* gradInput1,
int norm_deg
) {
int n = 0;
n = THCudaTensor_nElement(state, gradInput1);
kernel_ChannelNorm_backward_input1<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
n,
THCudaTensor_data(state, input1), make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]), make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]),
THCudaTensor_data(state, output), make_long4(output->size[0], output->size[1], output->size[2], output->size[3]), make_long4(output->stride[0], output->stride[1], output->stride[2], output->stride[3]),
THCudaTensor_data(state, gradOutput), make_long4(gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]), make_long4(gradOutput->stride[0], gradOutput->stride[1], gradOutput->stride[2], gradOutput->stride[3]),
THCudaTensor_data(state, gradInput1), make_long4(gradInput1->size[0], gradInput1->size[1], gradInput1->size[2], gradInput1->size[3]), make_long4(gradInput1->stride[0], gradInput1->stride[1], gradInput1->stride[2], gradInput1->stride[3]),
norm_deg
);
THCudaCheck(cudaGetLastError());
}
#ifdef __cplusplus
}
#endif |
f5d8e4b2a3ec4a99b322567c0fa86a960fc6eff5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include"hip/device_functions.h"
#include <stdio.h>
#include<iostream>
#include <fstream>
#include <string>
#include "math.h"
#include<time.h>
//skiplist
#include <time.h>
#include <malloc.h>
#include <unordered_map>
using namespace std;
#define NEI_MAX 300 //
#define PEOPLE_CAP 2500 //
#define MOVIE_CAP 2500 //
#define TEST_DATA_CAP 30000 //
#define THREDS_NUM 1024 //
double approximate(double a)
{
if (a < 1)
return 1;
if (a > 5)
return 5;
//if (a < 2.5)
// return 1;
int b = (int)a;
if (a - (double)b > 0.6)
return (double)+ 1;
if (a - (double)b > 0.3)
return (double)b + 0.5;
return (double)b;
}
//
__global__ void getSim(const int testData[], const double rating_map[],
double sim[], const int USER_NUM, const int MOVIE_NUM, const int offeset)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int movie1Id = bid + 1; //BLock
int movie2Id = ((tid > MOVIE_NUM >> 1) ? MOVIE_NUM - tid : tid) + 1;
//moviehash
double movie1Sum = 0;
double movie2Sum = 0;
int kernelNum = 0;
//movie1
double movie1[PEOPLE_CAP];
double movie2[PEOPLE_CAP];
//
for (int i = 1; i <= USER_NUM; i++)
if (rating_map[movie1Id * PEOPLE_CAP + i] && rating_map[movie2Id * PEOPLE_CAP + i])
{
movie1Sum += rating_map[movie1Id * PEOPLE_CAP + i];
movie2Sum += rating_map[movie2Id * PEOPLE_CAP + i];
movie1[kernelNum] = rating_map[movie1Id * PEOPLE_CAP + i];
movie2[kernelNum] = rating_map[movie2Id * PEOPLE_CAP + i];
kernelNum++;
}
//100
if (kernelNum>0)
{
//
double bar1 = movie1Sum / kernelNum;
double bar2 = movie2Sum / kernelNum;
double temp1 = 0;
double temp2 = 0;
double temp3 = 0;
for (int i = 0; i < kernelNum; i++)
{
temp1 += (movie1[i] - bar1)*(movie2[i] - bar2);
temp2 += (movie1[i] - bar1)*(movie1[i] - bar1);
temp3 += (movie2[i] - bar2)*(movie2[i] - bar2);
}
//0
if( temp2 && temp3)
{
double result = temp1 / sqrt(temp2 * temp3);
sim[movie1Id * MOVIE_NUM + movie2Id] = result;
sim[movie2Id * MOVIE_NUM + movie1Id] = result;
}
else
{
sim[movie1Id * MOVIE_NUM + movie2Id] = -10; //
sim[movie2Id * MOVIE_NUM + movie1Id] = -10;
}
}
else
{
sim[movie1Id * MOVIE_NUM + movie2Id] = -10;
sim[movie2Id * MOVIE_NUM + movie1Id] = -10;
}
}
__global__ void conclude(double result[], const double rating_map[], double sim[],
const int TEST_NUM, const int MOVIE_NUM, const int testData[])
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int index = bid * blockDim.x + tid; //
if (index < TEST_NUM)
{
int testMovieId = testData[index] & 0xFFFF; //id
int userId = testData[index] >> 16;
//
double a = 0;
double b = 0;
double t1, t2, t3, t4;
double result1 = 0;
t1 = t2 = t3 = t4 = 0;
int length = 0;
double rate;
double similarity;
for (int i = 0; i < MOVIE_NUM; i++)
if ( (rate = rating_map[i * PEOPLE_CAP + userId])
&& (similarity = sim[testMovieId * MOVIE_NUM + i]) && similarity >= -1)
{
t1 += similarity * similarity; //x^2
t2 += similarity;
t3 += similarity * rate; //x*y
t4 += rate;
length++;
}
double sim_bar = t2 / length;
double rate_bar = t4 / length;
a = (t3*length - t2*t4) / (t1*length - t2*t2);
b = (t1*t4 - t2*t3) / (t1*length - t2*t2);
//result[index] = (a + b) > 0 ? a + b : 1;
result[index] = a + b; //result
}
}
double *dev_rating_map = 0;
double *temp_dev_rating_map = 0;
double *dev_sim = 0;
int *dev_test_data = 0;
double *dev_result = 0;
int main()
{
//
int testData[TEST_DATA_CAP];
double testRating[TEST_DATA_CAP];
int startClock = clock();
hipSetDevice(0);
//
int ReadingClock = clock();
string fileName = "data/ua.base";
ifstream ratingFile(fileName);
if (!ratingFile.is_open())
{
std::cout << "Error opening " + fileName;
exit(1);
}
//
fileName = "data/ua.test";
ifstream testFile(fileName);
if (!testFile.is_open())
{
std::cout << "Error opening " + fileName;
exit(1);
}
//
fileName = "data/result.txt";
ofstream resultFile;
resultFile.open(fileName);
if (!resultFile.is_open())
{
std::cout << "Error opening " + fileName;
exit(1);
}
//
double rating;
int userId, movieId;
long long int timeStamp;
int TEST_NUM = 0;
while (!testFile.eof())
{
testFile >> userId >> movieId >> rating >> timeStamp;
//int temp = userId << 16 | movieId;
//cout << (temp >> 16) << " " << (temp & 0xFFFF) << endl;
testRating[TEST_NUM] = rating;
testData[TEST_NUM++] = userId << 16 | movieId; //id65 536
}
hipMalloc((void**)&dev_test_data, TEST_NUM * sizeof(int));
hipMemcpy(dev_test_data, testData, TEST_NUM * sizeof(int), hipMemcpyHostToDevice);
testFile.close();
//
hipMalloc((void**)&dev_rating_map, PEOPLE_CAP * MOVIE_CAP * sizeof(double)); //,
int MOVIE_NUM = 0;
int PEOPLE_NUM = 0;
while (!ratingFile.eof())
{
ratingFile >> userId >> movieId >> rating >> timeStamp;
if (PEOPLE_NUM < userId)
PEOPLE_NUM = userId;
if (movieId > MOVIE_NUM)
MOVIE_NUM = movieId + 1;
hipMemcpy(dev_rating_map + userId + movieId * PEOPLE_CAP, &rating, sizeof(double), hipMemcpyHostToDevice);
}
ratingFile.close();
//double temp[20000];
//for (int i = 0; i < MOVIE_CAP; i++)
//{
// cout << i << endl;
// hipMemcpy(temp, dev_rating_map + PEOPLE_CAP * i, PEOPLE_CAP * sizeof(double), hipMemcpyDeviceToHost);
// for (int j = 0; j < PEOPLE_CAP; j++)
// if (temp[j] != 0)
// cout << temp[j] << " ";
// cout << endl;
//}
cout << "There are " << MOVIE_NUM << " movies amd " << PEOPLE_NUM << " peoples" << endl;
cout << TEST_NUM << " data need to be predicted" << endl;
std::cout << "ReadFile use " << clock() - ReadingClock << "ms" << endl;
hipMalloc((void**)&dev_sim, MOVIE_NUM * MOVIE_NUM * sizeof(double)); //sim
int threadNum = MOVIE_NUM > 1024 ? 1024 : MOVIE_NUM;
hipLaunchKernelGGL(( getSim) , dim3(MOVIE_NUM), dim3(MOVIE_NUM /2), 0, 0, dev_test_data, dev_rating_map, dev_sim, PEOPLE_NUM, MOVIE_NUM, 0);
hipDeviceSynchronize();
hipMalloc((void**)&dev_result, TEST_NUM * sizeof(double)); //sim
conclude << < 20, 1000 >> > (dev_result, dev_rating_map, dev_sim, TEST_NUM, MOVIE_NUM, dev_test_data);
double *result = new double[TEST_NUM];
hipMemcpy(result, dev_result, TEST_NUM * sizeof(double), hipMemcpyDeviceToHost);
double d = 0;
int realNum = 0;
for (int i = 0; i < TEST_NUM; i++)
{
double predict = approximate(result[i]);
//cout << (testData[i] >> 16) << " " << (testData[i] & 0xFFFF) << " " << testRating[i] << " " << predict << endl;
if ( predict != 1)
{
//cout << (testData[i] >> 16) << " " << (testData[i] & 0xFFFF) << " " << testRating[i] << " " << predict << endl;
double c = fabs(testRating[i] - predict);
if (c < 5) {
resultFile << (testData[i] >> 16) << " " << (testData[i] & 0xFFFF) << " " << testRating[i] << " " << predict << endl;
d += c;
}
realNum++;
}
}
cout << "MAE" << d / realNum << endl;
cout << ":" << (double)realNum / TEST_NUM << endl;
hipFree(dev_rating_map);
hipFree(dev_test_data);
hipFree(dev_result);
hipFree(dev_sim);
std::cout << "total use " << clock() - startClock << "ms" << endl;
return 0;
}
| f5d8e4b2a3ec4a99b322567c0fa86a960fc6eff5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include"device_functions.h"
#include <stdio.h>
#include<iostream>
#include <fstream>
#include <string>
#include "math.h"
#include<time.h>
//skiplist
#include <time.h>
#include <malloc.h>
#include <unordered_map>
using namespace std;
#define NEI_MAX 300 //最大允许的相似矩阵大小
#define PEOPLE_CAP 2500 //总人数
#define MOVIE_CAP 2500 //总电影数目
#define TEST_DATA_CAP 30000 //可分析的电影最大数
#define THREDS_NUM 1024 //块的最大线程数
double approximate(double a)
{
if (a < 1)
return 1;
if (a > 5)
return 5;
//if (a < 2.5)
// return 1;
int b = (int)a;
if (a - (double)b > 0.6)
return (double)+ 1;
if (a - (double)b > 0.3)
return (double)b + 0.5;
return (double)b;
}
//计算相似度
__global__ void getSim(const int testData[], const double rating_map[],
double sim[], const int USER_NUM, const int MOVIE_NUM, const int offeset)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
int movie1Id = bid + 1; //获得当前BLock的
int movie2Id = ((tid > MOVIE_NUM >> 1) ? MOVIE_NUM - tid : tid) + 1;
//该线程对应的movie数,为了避免计算多次,设计了求余的hash
double movie1Sum = 0;
double movie2Sum = 0;
int kernelNum = 0;
//用来存储两个movie1的的向量
double movie1[PEOPLE_CAP];
double movie2[PEOPLE_CAP];
//挑选出同时看过两部电影的人加入矩阵
for (int i = 1; i <= USER_NUM; i++)
if (rating_map[movie1Id * PEOPLE_CAP + i] && rating_map[movie2Id * PEOPLE_CAP + i])
{
movie1Sum += rating_map[movie1Id * PEOPLE_CAP + i];
movie2Sum += rating_map[movie2Id * PEOPLE_CAP + i];
movie1[kernelNum] = rating_map[movie1Id * PEOPLE_CAP + i];
movie2[kernelNum] = rating_map[movie2Id * PEOPLE_CAP + i];
kernelNum++;
}
//如果没有向量模数不足100则将其看做无法计算相似度,之后预测时不使用
if (kernelNum>0)
{
//计算关联相似度
double bar1 = movie1Sum / kernelNum;
double bar2 = movie2Sum / kernelNum;
double temp1 = 0;
double temp2 = 0;
double temp3 = 0;
for (int i = 0; i < kernelNum; i++)
{
temp1 += (movie1[i] - bar1)*(movie2[i] - bar2);
temp2 += (movie1[i] - bar1)*(movie1[i] - bar1);
temp3 += (movie2[i] - bar2)*(movie2[i] - bar2);
}
//如果分母为0当做相似度不存在
if( temp2 && temp3)
{
double result = temp1 / sqrt(temp2 * temp3);
sim[movie1Id * MOVIE_NUM + movie2Id] = result;
sim[movie2Id * MOVIE_NUM + movie1Id] = result;
}
else
{
sim[movie1Id * MOVIE_NUM + movie2Id] = -10; //相似度不存在
sim[movie2Id * MOVIE_NUM + movie1Id] = -10;
}
}
else
{
sim[movie1Id * MOVIE_NUM + movie2Id] = -10;
sim[movie2Id * MOVIE_NUM + movie1Id] = -10;
}
}
__global__ void conclude(double result[], const double rating_map[], double sim[],
const int TEST_NUM, const int MOVIE_NUM, const int testData[])
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int index = bid * blockDim.x + tid; //该线程需要计算的测试数据案例
if (index < TEST_NUM)
{
int testMovieId = testData[index] & 0xFFFF; //需要测量的电影id
int userId = testData[index] >> 16;
//线性归回
double a = 0;
double b = 0;
double t1, t2, t3, t4;
double result1 = 0;
t1 = t2 = t3 = t4 = 0;
int length = 0;
double rate;
double similarity;
for (int i = 0; i < MOVIE_NUM; i++)
if ( (rate = rating_map[i * PEOPLE_CAP + userId])
&& (similarity = sim[testMovieId * MOVIE_NUM + i]) && similarity >= -1)
{
t1 += similarity * similarity; //x^2
t2 += similarity;
t3 += similarity * rate; //x*y
t4 += rate;
length++;
}
double sim_bar = t2 / length;
double rate_bar = t4 / length;
a = (t3*length - t2*t4) / (t1*length - t2*t2);
b = (t1*t4 - t2*t3) / (t1*length - t2*t2);
//result[index] = (a + b) > 0 ? a + b : 1;
result[index] = a + b; //将结果存入result数组
}
}
double *dev_rating_map = 0;
double *temp_dev_rating_map = 0;
double *dev_sim = 0;
int *dev_test_data = 0;
double *dev_result = 0;
int main()
{
//测试结果
int testData[TEST_DATA_CAP];
double testRating[TEST_DATA_CAP];
int startClock = clock();
cudaSetDevice(0);
//数据文件
int ReadingClock = clock();
string fileName = "data/ua.base";
ifstream ratingFile(fileName);
if (!ratingFile.is_open())
{
std::cout << "Error opening " + fileName;
exit(1);
}
//测试文件
fileName = "data/ua.test";
ifstream testFile(fileName);
if (!testFile.is_open())
{
std::cout << "Error opening " + fileName;
exit(1);
}
//结果文件
fileName = "data/result.txt";
ofstream resultFile;
resultFile.open(fileName);
if (!resultFile.is_open())
{
std::cout << "Error opening " + fileName;
exit(1);
}
//读入测试文件
double rating;
int userId, movieId;
long long int timeStamp;
int TEST_NUM = 0;
while (!testFile.eof())
{
testFile >> userId >> movieId >> rating >> timeStamp;
//int temp = userId << 16 | movieId;
//cout << (temp >> 16) << " " << (temp & 0xFFFF) << endl;
testRating[TEST_NUM] = rating;
testData[TEST_NUM++] = userId << 16 | movieId; //假设id值都不大于65 536
}
cudaMalloc((void**)&dev_test_data, TEST_NUM * sizeof(int));
cudaMemcpy(dev_test_data, testData, TEST_NUM * sizeof(int), cudaMemcpyHostToDevice);
testFile.close();
//读入评分数据
cudaMalloc((void**)&dev_rating_map, PEOPLE_CAP * MOVIE_CAP * sizeof(double)); //行是电影,列是人
int MOVIE_NUM = 0;
int PEOPLE_NUM = 0;
while (!ratingFile.eof())
{
ratingFile >> userId >> movieId >> rating >> timeStamp;
if (PEOPLE_NUM < userId)
PEOPLE_NUM = userId;
if (movieId > MOVIE_NUM)
MOVIE_NUM = movieId + 1;
cudaMemcpy(dev_rating_map + userId + movieId * PEOPLE_CAP, &rating, sizeof(double), cudaMemcpyHostToDevice);
}
ratingFile.close();
//double temp[20000];
//for (int i = 0; i < MOVIE_CAP; i++)
//{
// cout << i << endl;
// cudaMemcpy(temp, dev_rating_map + PEOPLE_CAP * i, PEOPLE_CAP * sizeof(double), cudaMemcpyDeviceToHost);
// for (int j = 0; j < PEOPLE_CAP; j++)
// if (temp[j] != 0)
// cout << temp[j] << " ";
// cout << endl;
//}
cout << "There are " << MOVIE_NUM << " movies amd " << PEOPLE_NUM << " peoples" << endl;
cout << TEST_NUM << " data need to be predicted" << endl;
std::cout << "ReadFile use " << clock() - ReadingClock << "ms" << endl;
cudaMalloc((void**)&dev_sim, MOVIE_NUM * MOVIE_NUM * sizeof(double)); //sim值
int threadNum = MOVIE_NUM > 1024 ? 1024 : MOVIE_NUM;
getSim <<< MOVIE_NUM, MOVIE_NUM /2>>> (dev_test_data, dev_rating_map, dev_sim, PEOPLE_NUM, MOVIE_NUM, 0);
cudaThreadSynchronize();
cudaMalloc((void**)&dev_result, TEST_NUM * sizeof(double)); //sim值
conclude << < 20, 1000 >> > (dev_result, dev_rating_map, dev_sim, TEST_NUM, MOVIE_NUM, dev_test_data);
double *result = new double[TEST_NUM];
cudaMemcpy(result, dev_result, TEST_NUM * sizeof(double), cudaMemcpyDeviceToHost);
double d = 0;
int realNum = 0;
for (int i = 0; i < TEST_NUM; i++)
{
double predict = approximate(result[i]);
//cout << (testData[i] >> 16) << " " << (testData[i] & 0xFFFF) << " " << testRating[i] << " " << predict << endl;
if ( predict != 1)
{
//cout << (testData[i] >> 16) << " " << (testData[i] & 0xFFFF) << " " << testRating[i] << " " << predict << endl;
double c = fabs(testRating[i] - predict);
if (c < 5) {
resultFile << (testData[i] >> 16) << " " << (testData[i] & 0xFFFF) << " " << testRating[i] << " " << predict << endl;
d += c;
}
realNum++;
}
}
cout << "MAE为" << d / realNum << endl;
cout << "有效率:" << (double)realNum / TEST_NUM << endl;
cudaFree(dev_rating_map);
cudaFree(dev_test_data);
cudaFree(dev_result);
cudaFree(dev_sim);
std::cout << "total use " << clock() - startClock << "ms" << endl;
return 0;
}
|
0233654dafe4ca98d19090a8b19064795c6b9a33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void hash_insert_cuckoo_nvmo(
bucket_t *hash_table,
ielem_t **blk_input,
int *blk_elem_num)
{
ielem_t *in = blk_input[blockIdx.x];
int total_elem_num = blk_elem_num[blockIdx.x];
// 16 threads to cooperate for one element
int step = blockDim.x >> ELEM_NUM_P;
int idx = threadIdx.x;
hash_t hash, second_hash;
loc_t loc, new_loc;
sign_t sig, new_sig;
int id;
int cuckoo_num;
bucket_t *b;
int chosen_simd;
int ballot, ml_mask;
int simd_lane = idx & ((1 << ELEM_NUM_P) - 1);
int elem_id = idx >> ELEM_NUM_P;
int bit_move = idx & (((1 << (5 - ELEM_NUM_P)) - 1) << ELEM_NUM_P);
for (id = elem_id; id < total_elem_num; id += step) {
ielem_t *elem = &(in[id]);
if (elem->sig == 0 && elem->loc == 0) {
printf("error, all is zero\n");
continue;
}
sig = elem->sig;
hash = elem->hash;
loc = elem->loc;
b = &(hash_table[hash & HASH_MASK]);
/*=====================================================================
* The double __syncthreads() seems useless in else, this is to match the two in
* if (chosen_simd == simd_lane). As is stated in the paper <Demystifying GPU
* Microarchitecture through Microbenchmarking>, the __syncthreads() will not go
* wrong if not all threads in one wrap reach it. However, the wraps in the same
* block need to reach a __syncthreads(), even if they are not on the same line */
/* Check for same signatures in two bucket */
ballot = __ballot_sync(FULL_MASK,b->sig[simd_lane] == sig);
/* first half warp(0~15 threads), bit_move = 0
* for second half warp(16~31 threads), bit_move = 16 */
ballot = (ballot >> bit_move) & ((1 << ELEM_NUM) - 1);
if (ballot != 0) {
chosen_simd = (__ffs(ballot) - 1) & ((1 << ELEM_NUM_P) - 1);
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
continue;
}
/*=====================================================================*/
/* Next we try to insert, the while loop breaks if ballot == 0, and the
* __syncthreads() in the two loops match if the code path divergent between
* the warps in a block. Or some will terminate, or process the next element.
* FIXME: if some wrap go to process next element, some stays here, will this
* lead to mismatch in __syncthreads()? If it does, we should launch one thread
* for each element. God knows what nVidia GPU will behave. FIXME;
* Here we write b->loc, and the above code also write b->loc. This will not
* lead to conflicts, because here all the signatures are 0, while the aboves
* are all non-zero */
/* Major Location : use last 4 bits of signature */
ml_mask = (1 << (sig & ((1 << ELEM_NUM_P) - 1))) - 1;
/* find the empty slot for insertion */
while (1) {
ballot = __ballot_sync(FULL_MASK,b->sig[simd_lane] == 0);
ballot = (ballot >> bit_move) & ((1 << ELEM_NUM) - 1);
/* 1010|0011 => 0000 0011 1010 0000, 16 bits to 32 bits*/
ballot = ((ballot & ml_mask) << 16) | ((ballot & ~(ml_mask)));
if (ballot != 0) {
chosen_simd = (__ffs(ballot) - 1) & ((1 << ELEM_NUM_P) - 1);
if (simd_lane == chosen_simd) {
b->sig[simd_lane] = sig;
CLWB(&(b->sig[simd_lane]));
asm("membar.gl;");
}
}
__syncthreads();
if (ballot != 0) {
if (b->sig[chosen_simd] == sig) {
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
goto finish;
}
} else {
break;
}
}
/* ==== try next bucket ==== */
cuckoo_num = 0;
cuckoo_evict:
second_hash = (((hash ^ sig) & BLOCK_HASH_MASK)
| (hash & ~BLOCK_HASH_MASK)) & HASH_MASK;
b = &(hash_table[second_hash]);
/*=====================================================================*/
/* Check for same signatures in two bucket */
ballot = __ballot_sync(FULL_MASK,b->sig[simd_lane] == sig);
/* first half warp(0~15 threads), bit_move = 0
* for second half warp(16~31 threads), bit_move = 16 */
ballot = (ballot >> bit_move) & ((1 << ELEM_NUM) - 1);
if (0 != ballot) {
chosen_simd = (__ffs(ballot) - 1) & ((1 << ELEM_NUM_P) - 1);
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
continue;
}
while (1) {
ballot = __ballot_sync(FULL_MASK,b->sig[simd_lane] == 0);
ballot = (ballot >> bit_move) & ((1 << ELEM_NUM) - 1);
ballot = ((ballot & ml_mask) << 16) | ((ballot & ~(ml_mask)));
if (ballot != 0) {
chosen_simd = (__ffs(ballot) - 1) & ((1 << ELEM_NUM_P) - 1);
} else {
/* No available slot.
* Get a Major location between 0 and 15 for insertion */
chosen_simd = elem->sig & ((1 << ELEM_NUM_P) - 1);
if (cuckoo_num < MAX_CUCKOO_NUM) {
/* record the signature to be evicted */
new_sig = b->sig[chosen_simd];
new_loc = b->loc[chosen_simd];
}
}
/* synchronize before the signature is written by others */
__syncthreads();
if (ballot != 0) {
if (simd_lane == chosen_simd) {
b->sig[simd_lane] = sig;
CLWB(&(b->sig[simd_lane]));
asm("membar.gl;");
}
} else {
/* two situations to handle: 1) cuckoo_num < MAX_CUCKOO_NUM,
* replace one element, and reinsert it into its alternative bucket.
* 2) cuckoo_num >= MAX_CUCKOO_NUM.
* The cuckoo evict exceed the maximum insert time, replace the element.
* In each case, we write the signature first.*/
if (simd_lane == chosen_simd) {
b->sig[simd_lane] = sig;
CLWB(&(b->sig[simd_lane]));
asm("membar.gl;");
}
}
__syncthreads();
if (ballot != 0) {
/* write the empty slot or try again when conflict */
if (b->sig[chosen_simd] == sig) {
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
goto finish;
}
} else {
if (cuckoo_num < MAX_CUCKOO_NUM) {
cuckoo_num ++;
if (b->sig[chosen_simd] == sig) {
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
sig = new_sig;
loc = new_loc;
goto cuckoo_evict;
} else {
/* if there is conflict when writing the signature,
* it has been replaced by another one. Reinserting
* the element is meaningless, because it will evict
* the one that is just inserted. Only one will survive,
* we just give up the failed one */
goto finish;
}
} else {
/* exceed the maximum insert time, evict one */
if (b->sig[chosen_simd] == sig) {
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
}
/* whether or not succesfully inserted, finish */
goto finish;
}
}
}
finish:
;
//now we get to the next element
}
return;
}
| 0233654dafe4ca98d19090a8b19064795c6b9a33.cu | __global__ void hash_insert_cuckoo_nvmo(
bucket_t *hash_table,
ielem_t **blk_input,
int *blk_elem_num)
{
ielem_t *in = blk_input[blockIdx.x];
int total_elem_num = blk_elem_num[blockIdx.x];
// 16 threads to cooperate for one element
int step = blockDim.x >> ELEM_NUM_P;
int idx = threadIdx.x;
hash_t hash, second_hash;
loc_t loc, new_loc;
sign_t sig, new_sig;
int id;
int cuckoo_num;
bucket_t *b;
int chosen_simd;
int ballot, ml_mask;
int simd_lane = idx & ((1 << ELEM_NUM_P) - 1);
int elem_id = idx >> ELEM_NUM_P;
int bit_move = idx & (((1 << (5 - ELEM_NUM_P)) - 1) << ELEM_NUM_P);
for (id = elem_id; id < total_elem_num; id += step) {
ielem_t *elem = &(in[id]);
if (elem->sig == 0 && elem->loc == 0) {
printf("error, all is zero\n");
continue;
}
sig = elem->sig;
hash = elem->hash;
loc = elem->loc;
b = &(hash_table[hash & HASH_MASK]);
/*=====================================================================
* The double __syncthreads() seems useless in else, this is to match the two in
* if (chosen_simd == simd_lane). As is stated in the paper <Demystifying GPU
* Microarchitecture through Microbenchmarking>, the __syncthreads() will not go
* wrong if not all threads in one wrap reach it. However, the wraps in the same
* block need to reach a __syncthreads(), even if they are not on the same line */
/* Check for same signatures in two bucket */
ballot = __ballot_sync(FULL_MASK,b->sig[simd_lane] == sig);
/* first half warp(0~15 threads), bit_move = 0
* for second half warp(16~31 threads), bit_move = 16 */
ballot = (ballot >> bit_move) & ((1 << ELEM_NUM) - 1);
if (ballot != 0) {
chosen_simd = (__ffs(ballot) - 1) & ((1 << ELEM_NUM_P) - 1);
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
continue;
}
/*=====================================================================*/
/* Next we try to insert, the while loop breaks if ballot == 0, and the
* __syncthreads() in the two loops match if the code path divergent between
* the warps in a block. Or some will terminate, or process the next element.
* FIXME: if some wrap go to process next element, some stays here, will this
* lead to mismatch in __syncthreads()? If it does, we should launch one thread
* for each element. God knows what nVidia GPU will behave. FIXME;
* Here we write b->loc, and the above code also write b->loc. This will not
* lead to conflicts, because here all the signatures are 0, while the aboves
* are all non-zero */
/* Major Location : use last 4 bits of signature */
ml_mask = (1 << (sig & ((1 << ELEM_NUM_P) - 1))) - 1;
/* find the empty slot for insertion */
while (1) {
ballot = __ballot_sync(FULL_MASK,b->sig[simd_lane] == 0);
ballot = (ballot >> bit_move) & ((1 << ELEM_NUM) - 1);
/* 1010|0011 => 0000 0011 1010 0000, 16 bits to 32 bits*/
ballot = ((ballot & ml_mask) << 16) | ((ballot & ~(ml_mask)));
if (ballot != 0) {
chosen_simd = (__ffs(ballot) - 1) & ((1 << ELEM_NUM_P) - 1);
if (simd_lane == chosen_simd) {
b->sig[simd_lane] = sig;
CLWB(&(b->sig[simd_lane]));
asm("membar.gl;");
}
}
__syncthreads();
if (ballot != 0) {
if (b->sig[chosen_simd] == sig) {
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
goto finish;
}
} else {
break;
}
}
/* ==== try next bucket ==== */
cuckoo_num = 0;
cuckoo_evict:
second_hash = (((hash ^ sig) & BLOCK_HASH_MASK)
| (hash & ~BLOCK_HASH_MASK)) & HASH_MASK;
b = &(hash_table[second_hash]);
/*=====================================================================*/
/* Check for same signatures in two bucket */
ballot = __ballot_sync(FULL_MASK,b->sig[simd_lane] == sig);
/* first half warp(0~15 threads), bit_move = 0
* for second half warp(16~31 threads), bit_move = 16 */
ballot = (ballot >> bit_move) & ((1 << ELEM_NUM) - 1);
if (0 != ballot) {
chosen_simd = (__ffs(ballot) - 1) & ((1 << ELEM_NUM_P) - 1);
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
continue;
}
while (1) {
ballot = __ballot_sync(FULL_MASK,b->sig[simd_lane] == 0);
ballot = (ballot >> bit_move) & ((1 << ELEM_NUM) - 1);
ballot = ((ballot & ml_mask) << 16) | ((ballot & ~(ml_mask)));
if (ballot != 0) {
chosen_simd = (__ffs(ballot) - 1) & ((1 << ELEM_NUM_P) - 1);
} else {
/* No available slot.
* Get a Major location between 0 and 15 for insertion */
chosen_simd = elem->sig & ((1 << ELEM_NUM_P) - 1);
if (cuckoo_num < MAX_CUCKOO_NUM) {
/* record the signature to be evicted */
new_sig = b->sig[chosen_simd];
new_loc = b->loc[chosen_simd];
}
}
/* synchronize before the signature is written by others */
__syncthreads();
if (ballot != 0) {
if (simd_lane == chosen_simd) {
b->sig[simd_lane] = sig;
CLWB(&(b->sig[simd_lane]));
asm("membar.gl;");
}
} else {
/* two situations to handle: 1) cuckoo_num < MAX_CUCKOO_NUM,
* replace one element, and reinsert it into its alternative bucket.
* 2) cuckoo_num >= MAX_CUCKOO_NUM.
* The cuckoo evict exceed the maximum insert time, replace the element.
* In each case, we write the signature first.*/
if (simd_lane == chosen_simd) {
b->sig[simd_lane] = sig;
CLWB(&(b->sig[simd_lane]));
asm("membar.gl;");
}
}
__syncthreads();
if (ballot != 0) {
/* write the empty slot or try again when conflict */
if (b->sig[chosen_simd] == sig) {
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
goto finish;
}
} else {
if (cuckoo_num < MAX_CUCKOO_NUM) {
cuckoo_num ++;
if (b->sig[chosen_simd] == sig) {
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
sig = new_sig;
loc = new_loc;
goto cuckoo_evict;
} else {
/* if there is conflict when writing the signature,
* it has been replaced by another one. Reinserting
* the element is meaningless, because it will evict
* the one that is just inserted. Only one will survive,
* we just give up the failed one */
goto finish;
}
} else {
/* exceed the maximum insert time, evict one */
if (b->sig[chosen_simd] == sig) {
if (simd_lane == chosen_simd) {
b->loc[simd_lane] = loc;
CLWB(&(b->loc[simd_lane]));
asm("membar.gl;");
}
}
/* whether or not succesfully inserted, finish */
goto finish;
}
}
}
finish:
;
//now we get to the next element
}
return;
}
|
5472b05e8d83037bcf338242245c0734eb3a2afa.hip | // !!! This is a file automatically generated by hipify!!!
// This is here so Netbeans doesn't error-spam my IDE
#if !defined(__HIPCC__)
// define the keywords, so that the IDE does not complain about them
#define __global__
#define __device__
#define __shared__
#define __constant__
#define blockIdx.x 1
#define blockDim.x 1
#define threadIdx.x 1
#endif
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <hip/hip_runtime_api.h>
#include "../../inc/GRT_Common/GRTCommon.h"
// These will be the same for all GPUs working on a hash.
__device__ __constant__ unsigned char MD5_Candidate_Device_Charset_Constant[512]; // Constant space for charset
__device__ __constant__ uint32_t MD5_Candidate_Device_Charset_Length; // Character set length
__device__ __constant__ uint32_t MD5_Candidate_Device_Chain_Length; // May as well pull it from constant memory... faster when cached.
__device__ __constant__ uint32_t MD5_Candidate_Device_Table_Index;
__device__ __constant__ uint32_t MD5_Candidate_Device_Number_Of_Threads; // It needs this, and can't easily calculate it
// 4 32-byte words for MD5 hashes
__device__ __constant__ uint32_t MD5_Candidate_Device_Hash[4];
#include "../../inc/CUDA_Common/CUDA_MD5.h"
#include "../../inc/CUDA_Common/Hash_Common.h"
#include "../../inc/GRT_CUDA_device/CUDA_Reduction_Functions.h"
// Copy the shared variables to the host
extern "C" void copyMD5CandidateDataToConstant(char *hostCharset, uint32_t hostCharsetLength,
uint32_t hostChainLength, uint32_t hostTableIndex, uint32_t hostNumberOfThreads) {
CUDA_SAFE_CALL(hipMemcpyToSymbol("MD5_Candidate_Device_Charset_Constant", hostCharset, 512));
CUDA_SAFE_CALL(hipMemcpyToSymbol("MD5_Candidate_Device_Charset_Length", &hostCharsetLength, sizeof(uint32_t)));
CUDA_SAFE_CALL(hipMemcpyToSymbol("MD5_Candidate_Device_Chain_Length", &hostChainLength, sizeof(uint32_t)));
CUDA_SAFE_CALL(hipMemcpyToSymbol("MD5_Candidate_Device_Table_Index", &hostTableIndex, sizeof(uint32_t)));
CUDA_SAFE_CALL(hipMemcpyToSymbol("MD5_Candidate_Device_Number_Of_Threads", &hostNumberOfThreads, sizeof(uint32_t)));
}
extern "C" void copyMD5HashDataToConstant(unsigned char *hash) {
// Yes, I'm copying into a uint32_t array from an unsigned char array. This works, though, and it makes
// my life easier.
// For fuck's sake, Bitweasil, copy the HASH, not the address of the hash!
CUDA_SAFE_CALL(hipMemcpyToSymbol(MD5_Candidate_Device_Hash, hash, 16 * sizeof(unsigned char)));
}
/*
__global__ void GenerateMD5CH10(unsigned char *CandidateHashes, uint32_t ThreadSpaceOffset, uint32_t StartStep, uint32_t StepsToRun) {
uint32_t b0,b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15;
uint32_t a,b,c,d;
uint32_t *InitialArray32;
uint32_t *OutputArray32;
// 32-bit accesses to the hash arrays
InitialArray32 = (uint32_t *)MD5_Candidate_Device_Hash;
OutputArray32 = (uint32_t *)CandidateHashes;
uint32_t i, chain_index, step_to_calculate, charset_offset, last_step_for_iteration;
const int pass_length = 10;
__shared__ char charset[512];
// Generic "copy charset to shared memory" function
//copySingleCharsetToShared(charset);
copySingleCharsetToShared(charset, MD5_Candidate_Device_Charset_Constant);
// Figure out which chain we are working on.
chain_index = ((blockIdx.x*blockDim.x + threadIdx.x) + (ThreadSpaceOffset * MD5_Candidate_Device_Number_Of_Threads));
// Find out if we're done with work.
// If our index + the startstep is greater than the chain length, this thread has nothing to do.
if ((chain_index + StartStep) > MD5_Candidate_Device_Chain_Length) {
return;
}
// Load the initial hash. This will either be from the constant or from the storage space.
if (StartStep == 0) {
a = InitialArray32[0];
b = InitialArray32[1];
c = InitialArray32[2];
d = InitialArray32[3];
} else {
a = OutputArray32[0 * MD5_Candidate_Device_Chain_Length + chain_index];
b = OutputArray32[1 * MD5_Candidate_Device_Chain_Length + chain_index];
c = OutputArray32[2 * MD5_Candidate_Device_Chain_Length + chain_index];
d = OutputArray32[3 * MD5_Candidate_Device_Chain_Length + chain_index];
}
// Figure out which step we're running.
step_to_calculate = chain_index + StartStep;
// Yes, modulus here is slow. And this is a less-critical chunk of code. So it stays here.
charset_offset = step_to_calculate % MD5_Candidate_Device_Charset_Length;
clearB0toB15(b0,b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15);
reduceSingleCharsetNormal(b0, b1, b2, a, b, c, d, step_to_calculate, charset, charset_offset, pass_length, MD5_Candidate_Device_Table_Index);
step_to_calculate++;
charset_offset++;
if (charset_offset >= MD5_Candidate_Device_Charset_Length) {
charset_offset = 0;
}
// Figure out the last step to run - either the chain length or
// the number of specified steps.
if ((step_to_calculate + StepsToRun) > MD5_Candidate_Device_Chain_Length) {
last_step_for_iteration = MD5_Candidate_Device_Chain_Length - 1;
} else {
last_step_for_iteration = (step_to_calculate + StepsToRun - 1); // Already run one
}
// We now have our (step+1) charset.
for (i = step_to_calculate; i <= last_step_for_iteration; i++) {
padMDHash(pass_length, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15);
CUDA_MD5(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d);
reduceSingleCharsetNormal(b0, b1, b2, a, b, c, d, i, charset, charset_offset, pass_length, MD5_Candidate_Device_Table_Index);
charset_offset++;
if (charset_offset >= MD5_Candidate_Device_Charset_Length) {
charset_offset = 0;
}
}
// Store the hash output.
OutputArray32[0 * MD5_Candidate_Device_Chain_Length + chain_index] = a;
OutputArray32[1 * MD5_Candidate_Device_Chain_Length + chain_index] = b;
OutputArray32[2 * MD5_Candidate_Device_Chain_Length + chain_index] = c;
OutputArray32[3 * MD5_Candidate_Device_Chain_Length + chain_index] = d;
}*/
#define CREATE_MD5_CH_KERNEL(length) \
__global__ void GenerateMD5CH##length(unsigned char *CandidateHashes, uint32_t ThreadSpaceOffset, uint32_t StartStep, uint32_t StepsToRun) { \
uint32_t b0,b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15; \
uint32_t a,b,c,d; \
uint32_t *InitialArray32, *OutputArray32; \
InitialArray32 = (uint32_t *)MD5_Candidate_Device_Hash; \
OutputArray32 = (uint32_t *)CandidateHashes; \
uint32_t i, chain_index, step_to_calculate, charset_offset, last_step_for_iteration; \
const int pass_length = length; \
__shared__ char charset[512]; \
copySingleCharsetToShared(charset, MD5_Candidate_Device_Charset_Constant); \
chain_index = ((blockIdx.x*blockDim.x + threadIdx.x) + (ThreadSpaceOffset * MD5_Candidate_Device_Number_Of_Threads)); \
if ((chain_index + StartStep) > MD5_Candidate_Device_Chain_Length) { \
return; \
} \
if (StartStep == 0) { \
a = InitialArray32[0]; \
b = InitialArray32[1]; \
c = InitialArray32[2]; \
d = InitialArray32[3]; \
} else { \
a = OutputArray32[0 * MD5_Candidate_Device_Chain_Length + chain_index]; \
b = OutputArray32[1 * MD5_Candidate_Device_Chain_Length + chain_index]; \
c = OutputArray32[2 * MD5_Candidate_Device_Chain_Length + chain_index]; \
d = OutputArray32[3 * MD5_Candidate_Device_Chain_Length + chain_index]; \
} \
step_to_calculate = chain_index + StartStep; \
charset_offset = step_to_calculate % MD5_Candidate_Device_Charset_Length; \
clearB0toB15(b0,b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \
reduceSingleCharsetNormal(b0, b1, b2, a, b, c, d, step_to_calculate, charset, charset_offset, pass_length, MD5_Candidate_Device_Table_Index); \
step_to_calculate++; \
charset_offset++; \
if (charset_offset >= MD5_Candidate_Device_Charset_Length) { \
charset_offset = 0; \
} \
if ((step_to_calculate + StepsToRun) > MD5_Candidate_Device_Chain_Length) { \
last_step_for_iteration = MD5_Candidate_Device_Chain_Length - 1; \
} else { \
last_step_for_iteration = (step_to_calculate + StepsToRun - 1); \
} \
for (i = step_to_calculate; i <= last_step_for_iteration; i++) { \
padMDHash(pass_length, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \
CUDA_MD5(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d); \
reduceSingleCharsetNormal(b0, b1, b2, a, b, c, d, i, charset, charset_offset, pass_length, MD5_Candidate_Device_Table_Index); \
charset_offset++; \
if (charset_offset >= MD5_Candidate_Device_Charset_Length) { \
charset_offset = 0; \
} \
} \
OutputArray32[0 * MD5_Candidate_Device_Chain_Length + chain_index] = a; \
OutputArray32[1 * MD5_Candidate_Device_Chain_Length + chain_index] = b; \
OutputArray32[2 * MD5_Candidate_Device_Chain_Length + chain_index] = c; \
OutputArray32[3 * MD5_Candidate_Device_Chain_Length + chain_index] = d; \
}
CREATE_MD5_CH_KERNEL(6)
CREATE_MD5_CH_KERNEL(7)
CREATE_MD5_CH_KERNEL(8)
CREATE_MD5_CH_KERNEL(9)
CREATE_MD5_CH_KERNEL(10)
extern "C" void LaunchMD5CandidateHashKernel(int PasswordLength, int CUDA_Blocks, int CUDA_Threads,
unsigned char *DEVICE_End_Hashes, uint32_t ThreadSpaceOffset, uint32_t StartStep, uint32_t StepsToRun) {
switch (PasswordLength) {
case 6:
hipLaunchKernelGGL(( GenerateMD5CH6) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0,
DEVICE_End_Hashes, ThreadSpaceOffset, StartStep, StepsToRun);
break;
case 7:
hipLaunchKernelGGL(( GenerateMD5CH7) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0,
DEVICE_End_Hashes, ThreadSpaceOffset, StartStep, StepsToRun);
break;
case 8:
hipLaunchKernelGGL(( GenerateMD5CH8) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0,
DEVICE_End_Hashes, ThreadSpaceOffset, StartStep, StepsToRun);
break;
case 9:
hipLaunchKernelGGL(( GenerateMD5CH9) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0,
DEVICE_End_Hashes, ThreadSpaceOffset, StartStep, StepsToRun);
break;
case 10:
hipLaunchKernelGGL(( GenerateMD5CH10) , dim3(CUDA_Blocks), dim3(CUDA_Threads) , 0, 0,
DEVICE_End_Hashes, ThreadSpaceOffset, StartStep, StepsToRun);
break;
default:
printf("Password length %d not supported!", PasswordLength);
exit(1);
}
}
| 5472b05e8d83037bcf338242245c0734eb3a2afa.cu | // This is here so Netbeans doesn't error-spam my IDE
#if !defined(__CUDACC__)
// define the keywords, so that the IDE does not complain about them
#define __global__
#define __device__
#define __shared__
#define __constant__
#define blockIdx.x 1
#define blockDim.x 1
#define threadIdx.x 1
#endif
#include <cuda.h>
#include <cutil.h>
#include <cuda_runtime_api.h>
#include "../../inc/GRT_Common/GRTCommon.h"
// These will be the same for all GPUs working on a hash.
__device__ __constant__ unsigned char MD5_Candidate_Device_Charset_Constant[512]; // Constant space for charset
__device__ __constant__ uint32_t MD5_Candidate_Device_Charset_Length; // Character set length
__device__ __constant__ uint32_t MD5_Candidate_Device_Chain_Length; // May as well pull it from constant memory... faster when cached.
__device__ __constant__ uint32_t MD5_Candidate_Device_Table_Index;
__device__ __constant__ uint32_t MD5_Candidate_Device_Number_Of_Threads; // It needs this, and can't easily calculate it
// 4 32-byte words for MD5 hashes
__device__ __constant__ uint32_t MD5_Candidate_Device_Hash[4];
#include "../../inc/CUDA_Common/CUDA_MD5.h"
#include "../../inc/CUDA_Common/Hash_Common.h"
#include "../../inc/GRT_CUDA_device/CUDA_Reduction_Functions.h"
// Copy the shared variables to the host
extern "C" void copyMD5CandidateDataToConstant(char *hostCharset, uint32_t hostCharsetLength,
uint32_t hostChainLength, uint32_t hostTableIndex, uint32_t hostNumberOfThreads) {
CUDA_SAFE_CALL(cudaMemcpyToSymbol("MD5_Candidate_Device_Charset_Constant", hostCharset, 512));
CUDA_SAFE_CALL(cudaMemcpyToSymbol("MD5_Candidate_Device_Charset_Length", &hostCharsetLength, sizeof(uint32_t)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol("MD5_Candidate_Device_Chain_Length", &hostChainLength, sizeof(uint32_t)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol("MD5_Candidate_Device_Table_Index", &hostTableIndex, sizeof(uint32_t)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol("MD5_Candidate_Device_Number_Of_Threads", &hostNumberOfThreads, sizeof(uint32_t)));
}
extern "C" void copyMD5HashDataToConstant(unsigned char *hash) {
// Yes, I'm copying into a uint32_t array from an unsigned char array. This works, though, and it makes
// my life easier.
// For fuck's sake, Bitweasil, copy the HASH, not the address of the hash!
CUDA_SAFE_CALL(cudaMemcpyToSymbol(MD5_Candidate_Device_Hash, hash, 16 * sizeof(unsigned char)));
}
/*
__global__ void GenerateMD5CH10(unsigned char *CandidateHashes, uint32_t ThreadSpaceOffset, uint32_t StartStep, uint32_t StepsToRun) {
uint32_t b0,b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15;
uint32_t a,b,c,d;
uint32_t *InitialArray32;
uint32_t *OutputArray32;
// 32-bit accesses to the hash arrays
InitialArray32 = (uint32_t *)MD5_Candidate_Device_Hash;
OutputArray32 = (uint32_t *)CandidateHashes;
uint32_t i, chain_index, step_to_calculate, charset_offset, last_step_for_iteration;
const int pass_length = 10;
__shared__ char charset[512];
// Generic "copy charset to shared memory" function
//copySingleCharsetToShared(charset);
copySingleCharsetToShared(charset, MD5_Candidate_Device_Charset_Constant);
// Figure out which chain we are working on.
chain_index = ((blockIdx.x*blockDim.x + threadIdx.x) + (ThreadSpaceOffset * MD5_Candidate_Device_Number_Of_Threads));
// Find out if we're done with work.
// If our index + the startstep is greater than the chain length, this thread has nothing to do.
if ((chain_index + StartStep) > MD5_Candidate_Device_Chain_Length) {
return;
}
// Load the initial hash. This will either be from the constant or from the storage space.
if (StartStep == 0) {
a = InitialArray32[0];
b = InitialArray32[1];
c = InitialArray32[2];
d = InitialArray32[3];
} else {
a = OutputArray32[0 * MD5_Candidate_Device_Chain_Length + chain_index];
b = OutputArray32[1 * MD5_Candidate_Device_Chain_Length + chain_index];
c = OutputArray32[2 * MD5_Candidate_Device_Chain_Length + chain_index];
d = OutputArray32[3 * MD5_Candidate_Device_Chain_Length + chain_index];
}
// Figure out which step we're running.
step_to_calculate = chain_index + StartStep;
// Yes, modulus here is slow. And this is a less-critical chunk of code. So it stays here.
charset_offset = step_to_calculate % MD5_Candidate_Device_Charset_Length;
clearB0toB15(b0,b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15);
reduceSingleCharsetNormal(b0, b1, b2, a, b, c, d, step_to_calculate, charset, charset_offset, pass_length, MD5_Candidate_Device_Table_Index);
step_to_calculate++;
charset_offset++;
if (charset_offset >= MD5_Candidate_Device_Charset_Length) {
charset_offset = 0;
}
// Figure out the last step to run - either the chain length or
// the number of specified steps.
if ((step_to_calculate + StepsToRun) > MD5_Candidate_Device_Chain_Length) {
last_step_for_iteration = MD5_Candidate_Device_Chain_Length - 1;
} else {
last_step_for_iteration = (step_to_calculate + StepsToRun - 1); // Already run one
}
// We now have our (step+1) charset.
for (i = step_to_calculate; i <= last_step_for_iteration; i++) {
padMDHash(pass_length, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15);
CUDA_MD5(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d);
reduceSingleCharsetNormal(b0, b1, b2, a, b, c, d, i, charset, charset_offset, pass_length, MD5_Candidate_Device_Table_Index);
charset_offset++;
if (charset_offset >= MD5_Candidate_Device_Charset_Length) {
charset_offset = 0;
}
}
// Store the hash output.
OutputArray32[0 * MD5_Candidate_Device_Chain_Length + chain_index] = a;
OutputArray32[1 * MD5_Candidate_Device_Chain_Length + chain_index] = b;
OutputArray32[2 * MD5_Candidate_Device_Chain_Length + chain_index] = c;
OutputArray32[3 * MD5_Candidate_Device_Chain_Length + chain_index] = d;
}*/
#define CREATE_MD5_CH_KERNEL(length) \
__global__ void GenerateMD5CH##length(unsigned char *CandidateHashes, uint32_t ThreadSpaceOffset, uint32_t StartStep, uint32_t StepsToRun) { \
uint32_t b0,b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15; \
uint32_t a,b,c,d; \
uint32_t *InitialArray32, *OutputArray32; \
InitialArray32 = (uint32_t *)MD5_Candidate_Device_Hash; \
OutputArray32 = (uint32_t *)CandidateHashes; \
uint32_t i, chain_index, step_to_calculate, charset_offset, last_step_for_iteration; \
const int pass_length = length; \
__shared__ char charset[512]; \
copySingleCharsetToShared(charset, MD5_Candidate_Device_Charset_Constant); \
chain_index = ((blockIdx.x*blockDim.x + threadIdx.x) + (ThreadSpaceOffset * MD5_Candidate_Device_Number_Of_Threads)); \
if ((chain_index + StartStep) > MD5_Candidate_Device_Chain_Length) { \
return; \
} \
if (StartStep == 0) { \
a = InitialArray32[0]; \
b = InitialArray32[1]; \
c = InitialArray32[2]; \
d = InitialArray32[3]; \
} else { \
a = OutputArray32[0 * MD5_Candidate_Device_Chain_Length + chain_index]; \
b = OutputArray32[1 * MD5_Candidate_Device_Chain_Length + chain_index]; \
c = OutputArray32[2 * MD5_Candidate_Device_Chain_Length + chain_index]; \
d = OutputArray32[3 * MD5_Candidate_Device_Chain_Length + chain_index]; \
} \
step_to_calculate = chain_index + StartStep; \
charset_offset = step_to_calculate % MD5_Candidate_Device_Charset_Length; \
clearB0toB15(b0,b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \
reduceSingleCharsetNormal(b0, b1, b2, a, b, c, d, step_to_calculate, charset, charset_offset, pass_length, MD5_Candidate_Device_Table_Index); \
step_to_calculate++; \
charset_offset++; \
if (charset_offset >= MD5_Candidate_Device_Charset_Length) { \
charset_offset = 0; \
} \
if ((step_to_calculate + StepsToRun) > MD5_Candidate_Device_Chain_Length) { \
last_step_for_iteration = MD5_Candidate_Device_Chain_Length - 1; \
} else { \
last_step_for_iteration = (step_to_calculate + StepsToRun - 1); \
} \
for (i = step_to_calculate; i <= last_step_for_iteration; i++) { \
padMDHash(pass_length, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15); \
CUDA_MD5(b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, a, b, c, d); \
reduceSingleCharsetNormal(b0, b1, b2, a, b, c, d, i, charset, charset_offset, pass_length, MD5_Candidate_Device_Table_Index); \
charset_offset++; \
if (charset_offset >= MD5_Candidate_Device_Charset_Length) { \
charset_offset = 0; \
} \
} \
OutputArray32[0 * MD5_Candidate_Device_Chain_Length + chain_index] = a; \
OutputArray32[1 * MD5_Candidate_Device_Chain_Length + chain_index] = b; \
OutputArray32[2 * MD5_Candidate_Device_Chain_Length + chain_index] = c; \
OutputArray32[3 * MD5_Candidate_Device_Chain_Length + chain_index] = d; \
}
CREATE_MD5_CH_KERNEL(6)
CREATE_MD5_CH_KERNEL(7)
CREATE_MD5_CH_KERNEL(8)
CREATE_MD5_CH_KERNEL(9)
CREATE_MD5_CH_KERNEL(10)
extern "C" void LaunchMD5CandidateHashKernel(int PasswordLength, int CUDA_Blocks, int CUDA_Threads,
unsigned char *DEVICE_End_Hashes, uint32_t ThreadSpaceOffset, uint32_t StartStep, uint32_t StepsToRun) {
switch (PasswordLength) {
case 6:
GenerateMD5CH6 <<< CUDA_Blocks, CUDA_Threads >>>
(DEVICE_End_Hashes, ThreadSpaceOffset, StartStep, StepsToRun);
break;
case 7:
GenerateMD5CH7 <<< CUDA_Blocks, CUDA_Threads >>>
(DEVICE_End_Hashes, ThreadSpaceOffset, StartStep, StepsToRun);
break;
case 8:
GenerateMD5CH8 <<< CUDA_Blocks, CUDA_Threads >>>
(DEVICE_End_Hashes, ThreadSpaceOffset, StartStep, StepsToRun);
break;
case 9:
GenerateMD5CH9 <<< CUDA_Blocks, CUDA_Threads >>>
(DEVICE_End_Hashes, ThreadSpaceOffset, StartStep, StepsToRun);
break;
case 10:
GenerateMD5CH10 <<< CUDA_Blocks, CUDA_Threads >>>
(DEVICE_End_Hashes, ThreadSpaceOffset, StartStep, StepsToRun);
break;
default:
printf("Password length %d not supported!", PasswordLength);
exit(1);
}
}
|
e42ad2221e047233207df448470fa173b6e16334.hip | // !!! This is a file automatically generated by hipify!!!
// Simple ADD kernel to demonstrate the general pattern in C CUDA
// compile: nvcc -o add add.cu
#include "data_generator.h"
#include <hip/hip_runtime.h>
#include <iostream>
#define COUNT 100000
// KERNEL
__global__ void MainCUDAKernel(int *a, int *b) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
a[id] *= b[id] + (a[id] % b[id]);
}
int main() {
srand(time(NULL));
// GENERATE & ALLOCATE DATA ON HOST
int dim = COUNT;
int size = sizeof(int) * dim;
int h_a[COUNT];
int h_b[COUNT];
populate(h_a, dim, false);
populate(h_b, dim, false);
// ALLOCATE DATA ON DEVICE AND COPY
int *d_a, *d_b;
if (hipMalloc(&d_a, size) != hipSuccess) {
std::cerr << "Failed: hipMalloc d_a" << std::endl;
return -1;
};
if (hipMalloc(&d_b, size) != hipSuccess) {
hipFree(d_a);
std::cerr << "Failed: hipMalloc d_b" << std::endl;
return -1;
};
if (hipMemcpy(d_a, &h_a, size, hipMemcpyHostToDevice) != hipSuccess) {
hipFree(d_a);
hipFree(d_b);
std::cerr << "Failed: hipMemcpy h_a" << std::endl;
return -1;
};
if (hipMemcpy(d_b, &h_b, size, hipMemcpyHostToDevice) != hipSuccess) {
hipFree(d_a);
hipFree(d_b);
std::cerr << "Failed: hipMemcpy h_b" << std::endl;
return -1;
};
// CALL KERNEL
// dim (number of threads) must be < 1024
hipLaunchKernelGGL(( MainCUDAKernel), dim3((dim / 1024) + 1), dim3(1024), 0, 0, d_a, d_b);
// OUTPUT
if (hipMemcpy(&h_a, d_a, size, hipMemcpyDeviceToHost) != hipSuccess) {
hipFree(d_a);
hipFree(d_b);
std::cerr << "Failed: hipMemcpy d_a " << std::endl;
return -1;
};
// for (int i = 0; i < dim; i++)
// std::cout << "a[" << i << "] = " << h_a[i] << std::endl;
// CLEANUP
hipFree(d_a);
hipFree(d_b);
return 0;
} | e42ad2221e047233207df448470fa173b6e16334.cu | // Simple ADD kernel to demonstrate the general pattern in C CUDA
// compile: nvcc -o add add.cu
#include "data_generator.h"
#include <cuda.h>
#include <iostream>
#define COUNT 100000
// KERNEL
__global__ void MainCUDAKernel(int *a, int *b) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
a[id] *= b[id] + (a[id] % b[id]);
}
int main() {
srand(time(NULL));
// GENERATE & ALLOCATE DATA ON HOST
int dim = COUNT;
int size = sizeof(int) * dim;
int h_a[COUNT];
int h_b[COUNT];
populate(h_a, dim, false);
populate(h_b, dim, false);
// ALLOCATE DATA ON DEVICE AND COPY
int *d_a, *d_b;
if (cudaMalloc(&d_a, size) != cudaSuccess) {
std::cerr << "Failed: cudaMalloc d_a" << std::endl;
return -1;
};
if (cudaMalloc(&d_b, size) != cudaSuccess) {
cudaFree(d_a);
std::cerr << "Failed: cudaMalloc d_b" << std::endl;
return -1;
};
if (cudaMemcpy(d_a, &h_a, size, cudaMemcpyHostToDevice) != cudaSuccess) {
cudaFree(d_a);
cudaFree(d_b);
std::cerr << "Failed: cudaMemcpy h_a" << std::endl;
return -1;
};
if (cudaMemcpy(d_b, &h_b, size, cudaMemcpyHostToDevice) != cudaSuccess) {
cudaFree(d_a);
cudaFree(d_b);
std::cerr << "Failed: cudaMemcpy h_b" << std::endl;
return -1;
};
// CALL KERNEL
// dim (number of threads) must be < 1024
MainCUDAKernel<<<(dim / 1024) + 1, 1024>>>(d_a, d_b);
// OUTPUT
if (cudaMemcpy(&h_a, d_a, size, cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(d_a);
cudaFree(d_b);
std::cerr << "Failed: cudaMemcpy d_a " << std::endl;
return -1;
};
// for (int i = 0; i < dim; i++)
// std::cout << "a[" << i << "] = " << h_a[i] << std::endl;
// CLEANUP
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
a4d9971568d5e98d315f4a1730e3fe645ade7afb.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <cmath>
#include <algorithm>
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include <vector>
#include "BatchedNmsPlugin.h"
#include "./cuda_utils.h"
#include "macros.h"
#ifdef CUDA_11
#include <hipcub/hipcub.hpp>
#include <cub/iterator/counting_input_iterator.cuh>
#else
#include <thrust/system/hip/detail/hipcub/hipcub.hpp>
#include <thrust/system/hip/detail/cub/iterator/counting_input_iterator.cuh>
namespace cub = thrust::cuda_cub::cub;
#endif
namespace nvinfer1 {
__global__ void batched_nms_kernel(
const int nms_method, const float threshold, const int num_detections,
const int *indices, float *scores, const float *classes, const float4 *boxes) {
// Go through detections by descending score
for (int m = 0; m < num_detections; m++) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_detections && m < i && scores[m] > 0.0f) {
int idx = indices[i];
int max_idx = indices[m];
int icls = classes[idx];
int mcls = classes[max_idx];
if (mcls == icls) {
float4 ibox = boxes[idx];
float4 mbox = boxes[max_idx];
float x1 = max(ibox.x, mbox.x);
float y1 = max(ibox.y, mbox.y);
float x2 = min(ibox.z, mbox.z);
float y2 = min(ibox.w, mbox.w);
float w = max(0.0f, x2 - x1);
float h = max(0.0f, y2 - y1);
float iarea = (ibox.z - ibox.x) * (ibox.w - ibox.y);
float marea = (mbox.z - mbox.x) * (mbox.w - mbox.y);
float inter = w * h;
float overlap = inter / (iarea + marea - inter);
float sigma = 0.5; // this is an empirical value
// printf("nms_method: %d", nms_method);
//nms methods selection in the second stage
// 0: original nms
// 1: soft-nms (linear)
// 2: soft-nms (gaussian)
// printf("nms_method: ", nms_method);
switch (nms_method)
{
case 0:
if (overlap > threshold) {
scores[i] = 0.0f;
}
break;
case 1:
if (overlap > threshold) {
scores[i] = (1 - overlap) * scores[i];
}
break;
case 2:
if (overlap > threshold) {
scores[i] = ::exp(-(overlap * overlap) / sigma) * scores[i];
}
break;
default:
if (overlap > threshold) {
scores[i] = 0.0f;
}
break;
}
}
}
// Sync discarded detections
__syncthreads();
}
}
int batchedNms(int nms_method, int batch_size,
const void *const *inputs, void *TRT_CONST_ENQUEUE*outputs,
size_t count, int detections_per_im, float nms_thresh,
void *workspace, size_t workspace_size, hipStream_t stream) {
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size += get_size_aligned<int>(count); // indices
workspace_size += get_size_aligned<int>(count); // indices_sorted
workspace_size += get_size_aligned<float>(count); // scores_sorted
size_t temp_size_sort = 0;
hipcub::DeviceRadixSort::SortPairsDescending(
static_cast<void*>(nullptr), temp_size_sort,
static_cast<float*>(nullptr),
static_cast<float*>(nullptr),
static_cast<int*>(nullptr),
static_cast<int*>(nullptr), count);
workspace_size += temp_size_sort;
return workspace_size;
}
auto on_stream = thrust::hip::par.on(stream);
auto indices = get_next_ptr<int>(count, workspace, workspace_size);
std::vector<int> indices_h(count);
for (int i = 0; i < count; i++)
indices_h[i] = i;
hipMemcpyAsync(indices, indices_h.data(), count * sizeof * indices, hipMemcpyHostToDevice, stream);
auto indices_sorted = get_next_ptr<int>(count, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(count, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * count;
auto in_boxes = static_cast<const float4 *>(inputs[1]) + batch * count;
auto in_classes = static_cast<const float *>(inputs[2]) + batch * count;
auto out_scores = static_cast<float *>(outputs[0]) + batch * detections_per_im;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * detections_per_im;
auto out_classes = static_cast<float *>(outputs[2]) + batch * detections_per_im;
// Sort scores and corresponding indices
int num_detections = count;
hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
in_scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores_sorted) * 8, stream);
// Launch actual NMS kernel - 1 block with each thread handling n detections
// TODO: different device has differnet max threads
const int max_threads = 1024;
int num_per_thread = ceil(static_cast<float>(num_detections) / max_threads);
batched_nms_kernel << <num_per_thread, max_threads, 0, stream >> > (nms_method, nms_thresh, num_detections,
indices_sorted, scores_sorted, in_classes, in_boxes);
// Re-sort with updated scores
hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores_sorted, scores_sorted, indices_sorted, indices,
num_detections, 0, sizeof(*scores_sorted) * 8, stream);
// Gather filtered scores, boxes, classes
num_detections = min(detections_per_im, num_detections);
hipMemcpyAsync(out_scores, scores_sorted, num_detections * sizeof *scores_sorted,
hipMemcpyDeviceToDevice, stream);
if (num_detections < detections_per_im) {
thrust::fill_n(on_stream, out_scores + num_detections, detections_per_im - num_detections, 0);
}
thrust::gather(on_stream, indices, indices + num_detections, in_boxes, out_boxes);
thrust::gather(on_stream, indices, indices + num_detections, in_classes, out_classes);
}
return 0;
}
} // namespace nvinfer1
| a4d9971568d5e98d315f4a1730e3fe645ade7afb.cu | #include <cuda.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <cmath>
#include <algorithm>
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include <vector>
#include "BatchedNmsPlugin.h"
#include "./cuda_utils.h"
#include "macros.h"
#ifdef CUDA_11
#include <cub/device/device_radix_sort.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
#else
#include <thrust/system/cuda/detail/cub/device/device_radix_sort.cuh>
#include <thrust/system/cuda/detail/cub/iterator/counting_input_iterator.cuh>
namespace cub = thrust::cuda_cub::cub;
#endif
namespace nvinfer1 {
__global__ void batched_nms_kernel(
const int nms_method, const float threshold, const int num_detections,
const int *indices, float *scores, const float *classes, const float4 *boxes) {
// Go through detections by descending score
for (int m = 0; m < num_detections; m++) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_detections && m < i && scores[m] > 0.0f) {
int idx = indices[i];
int max_idx = indices[m];
int icls = classes[idx];
int mcls = classes[max_idx];
if (mcls == icls) {
float4 ibox = boxes[idx];
float4 mbox = boxes[max_idx];
float x1 = max(ibox.x, mbox.x);
float y1 = max(ibox.y, mbox.y);
float x2 = min(ibox.z, mbox.z);
float y2 = min(ibox.w, mbox.w);
float w = max(0.0f, x2 - x1);
float h = max(0.0f, y2 - y1);
float iarea = (ibox.z - ibox.x) * (ibox.w - ibox.y);
float marea = (mbox.z - mbox.x) * (mbox.w - mbox.y);
float inter = w * h;
float overlap = inter / (iarea + marea - inter);
float sigma = 0.5; // this is an empirical value
// printf("nms_method: %d", nms_method);
//nms methods selection in the second stage
// 0: original nms
// 1: soft-nms (linear)
// 2: soft-nms (gaussian)
// printf("nms_method: ", nms_method);
switch (nms_method)
{
case 0:
if (overlap > threshold) {
scores[i] = 0.0f;
}
break;
case 1:
if (overlap > threshold) {
scores[i] = (1 - overlap) * scores[i];
}
break;
case 2:
if (overlap > threshold) {
scores[i] = std::exp(-(overlap * overlap) / sigma) * scores[i];
}
break;
default:
if (overlap > threshold) {
scores[i] = 0.0f;
}
break;
}
}
}
// Sync discarded detections
__syncthreads();
}
}
int batchedNms(int nms_method, int batch_size,
const void *const *inputs, void *TRT_CONST_ENQUEUE*outputs,
size_t count, int detections_per_im, float nms_thresh,
void *workspace, size_t workspace_size, cudaStream_t stream) {
if (!workspace || !workspace_size) {
// Return required scratch space size cub style
workspace_size += get_size_aligned<int>(count); // indices
workspace_size += get_size_aligned<int>(count); // indices_sorted
workspace_size += get_size_aligned<float>(count); // scores_sorted
size_t temp_size_sort = 0;
cub::DeviceRadixSort::SortPairsDescending(
static_cast<void*>(nullptr), temp_size_sort,
static_cast<float*>(nullptr),
static_cast<float*>(nullptr),
static_cast<int*>(nullptr),
static_cast<int*>(nullptr), count);
workspace_size += temp_size_sort;
return workspace_size;
}
auto on_stream = thrust::cuda::par.on(stream);
auto indices = get_next_ptr<int>(count, workspace, workspace_size);
std::vector<int> indices_h(count);
for (int i = 0; i < count; i++)
indices_h[i] = i;
cudaMemcpyAsync(indices, indices_h.data(), count * sizeof * indices, cudaMemcpyHostToDevice, stream);
auto indices_sorted = get_next_ptr<int>(count, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(count, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++) {
auto in_scores = static_cast<const float *>(inputs[0]) + batch * count;
auto in_boxes = static_cast<const float4 *>(inputs[1]) + batch * count;
auto in_classes = static_cast<const float *>(inputs[2]) + batch * count;
auto out_scores = static_cast<float *>(outputs[0]) + batch * detections_per_im;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * detections_per_im;
auto out_classes = static_cast<float *>(outputs[2]) + batch * detections_per_im;
// Sort scores and corresponding indices
int num_detections = count;
cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
in_scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores_sorted) * 8, stream);
// Launch actual NMS kernel - 1 block with each thread handling n detections
// TODO: different device has differnet max threads
const int max_threads = 1024;
int num_per_thread = ceil(static_cast<float>(num_detections) / max_threads);
batched_nms_kernel << <num_per_thread, max_threads, 0, stream >> > (nms_method, nms_thresh, num_detections,
indices_sorted, scores_sorted, in_classes, in_boxes);
// Re-sort with updated scores
cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores_sorted, scores_sorted, indices_sorted, indices,
num_detections, 0, sizeof(*scores_sorted) * 8, stream);
// Gather filtered scores, boxes, classes
num_detections = min(detections_per_im, num_detections);
cudaMemcpyAsync(out_scores, scores_sorted, num_detections * sizeof *scores_sorted,
cudaMemcpyDeviceToDevice, stream);
if (num_detections < detections_per_im) {
thrust::fill_n(on_stream, out_scores + num_detections, detections_per_im - num_detections, 0);
}
thrust::gather(on_stream, indices, indices + num_detections, in_boxes, out_boxes);
thrust::gather(on_stream, indices, indices + num_detections, in_classes, out_classes);
}
return 0;
}
} // namespace nvinfer1
|
a0ad8b8960564735600a72a6e5af5b00a3a767d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <assert.h>
#include <iostream>
#include <random>
#include "linear_layer.hh"
#include "nn_exception.hh"
__global__ void linearLayerForward( float* W, float* A, float* Z, float* b,
int W_x_dim, int W_y_dim,
int A_x_dim, int A_y_dim) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int Z_x_dim = A_x_dim;
int Z_y_dim = W_y_dim;
float Z_value = 0;
if (row < Z_y_dim && col < Z_x_dim) {
for (int i = 0; i < W_x_dim; i++) {
Z_value += W[row * W_x_dim + i] * A[i * A_x_dim + col];
}
Z[row * Z_x_dim + col] = Z_value + b[row];
}
}
__global__ void linearLayerBackprop(float* W, float* dZ, float *dA,
int W_x_dim, int W_y_dim,
int dZ_x_dim, int dZ_y_dim) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
// W is treated as transposed
int dA_x_dim = dZ_x_dim;
int dA_y_dim = W_x_dim;
float dA_value = 0.0f;
if (row < dA_y_dim && col < dA_x_dim) {
for (int i = 0; i < W_y_dim; i++) {
dA_value += W[i * W_x_dim + row] * dZ[i * dZ_x_dim + col];
}
dA[row * dA_x_dim + col] = dA_value;
}
}
__global__ void linearLayerUpdateWeights( float* dZ, float* A, float* W,
int dZ_x_dim, int dZ_y_dim,
int A_x_dim, int A_y_dim,
float learning_rate) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
// A is treated as transposed
int W_x_dim = A_y_dim;
int W_y_dim = dZ_y_dim;
float dW_value = 0.0f;
if (row < W_y_dim && col < W_x_dim) {
for (int i = 0; i < dZ_x_dim; i++) {
dW_value += dZ[row * dZ_x_dim + i] * A[col * A_x_dim + i];
}
W[row * W_x_dim + col] = W[row * W_x_dim + col] - learning_rate * (dW_value / A_x_dim);
}
}
__global__ void linearLayerUpdateBias( float* dZ, float* b,
int dZ_x_dim, int dZ_y_dim,
int b_x_dim,
float learning_rate) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dZ_x_dim * dZ_y_dim) {
int dZ_x = index % dZ_x_dim;
int dZ_y = index / dZ_x_dim;
atomicAdd(&b[dZ_y], - learning_rate * (dZ[dZ_y * dZ_x_dim + dZ_x] / dZ_x_dim));
}
}
LinearLayer::LinearLayer(std::string name, Shape W_shape) :
W(W_shape), b(W_shape.y, 1)
{
this->name = name;
b.allocateMemory();
W.allocateMemory();
initializeBiasWithZeros();
initializeWeightsRandomly();
}
LinearLayer::~LinearLayer()
{ }
void LinearLayer::initializeWeightsRandomly() {
std::default_random_engine generator;
std::normal_distribution<float> normal_distribution(0.0, 1.0);
for (int x = 0; x < W.shape.x; x++) {
for (int y = 0; y < W.shape.y; y++) {
W[y * W.shape.x + x] = normal_distribution(generator) * weights_init_threshold;
}
}
}
void LinearLayer::initializeBiasWithZeros() {
for (int x = 0; x < b.shape.x; x++) {
b[x] = 0;
}
}
Matrix& LinearLayer::forward(Matrix& A) {
assert(W.shape.x == A.shape.y);
this->A = A;
Shape Z_shape(A.shape.x, W.shape.y);
Z.allocateMemoryIfNotAllocated(Z_shape);
computeAndStoreLayerOutput(A);
NNException::throwIfDeviceErrorsOccurred("Cannot perform linear layer forward propagation.");
return Z;
}
void LinearLayer::computeAndStoreLayerOutput(Matrix& A) {
dim3 block_size(8, 8);
dim3 num_of_blocks( (Z.shape.x + block_size.x - 1) / block_size.x,
(Z.shape.y + block_size.y - 1) / block_size.y);
hipLaunchKernelGGL(( linearLayerForward), dim3(num_of_blocks), dim3(block_size), 0, 0, W.data.get(),
A.data.get(),
Z.data.get(),
b.data.get(),
W.shape.x, W.shape.y,
A.shape.x, A.shape.y);
}
Matrix& LinearLayer::backprop(Matrix& dZ, float learning_rate) {
dA.allocateMemoryIfNotAllocated(A.shape);
computeAndStoreBackpropError(dZ);
NNException::throwIfDeviceErrorsOccurred("Cannot perform back propagation.");
updateBias(dZ, learning_rate);
NNException::throwIfDeviceErrorsOccurred("Cannot perform bias update.");
updateWeights(dZ, learning_rate);
NNException::throwIfDeviceErrorsOccurred("Cannot perform weights update.");
return dA;
}
void LinearLayer::computeAndStoreBackpropError(Matrix& dZ) {
dim3 block_size(8, 8);
dim3 num_of_blocks( (A.shape.x + block_size.x - 1) / block_size.x,
(A.shape.y + block_size.y - 1) / block_size.y);
hipLaunchKernelGGL(( linearLayerBackprop), dim3(num_of_blocks), dim3(block_size), 0, 0, W.data.get(),
dZ.data.get(),
dA.data.get(),
W.shape.x, W.shape.y,
dZ.shape.x, dZ.shape.y);
}
void LinearLayer::updateWeights(Matrix& dZ, float learning_rate) {
dim3 block_size(8, 8);
dim3 num_of_blocks( (W.shape.x + block_size.x - 1) / block_size.x,
(W.shape.y + block_size.y - 1) / block_size.y);
hipLaunchKernelGGL(( linearLayerUpdateWeights), dim3(num_of_blocks), dim3(block_size), 0, 0, dZ.data.get(),
A.data.get(),
W.data.get(),
dZ.shape.x, dZ.shape.y,
A.shape.x, A.shape.y,
learning_rate);
}
void LinearLayer::updateBias(Matrix& dZ, float learning_rate) {
dim3 block_size(256);
dim3 num_of_blocks( (dZ.shape.y * dZ.shape.x + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( linearLayerUpdateBias), dim3(num_of_blocks), dim3(block_size), 0, 0, dZ.data.get(),
b.data.get(),
dZ.shape.x, dZ.shape.y,
b.shape.x, learning_rate);
}
int LinearLayer::getXDim() const {
return W.shape.x;
}
int LinearLayer::getYDim() const {
return W.shape.y;
}
Matrix LinearLayer::getWeightsMatrix() const {
return W;
}
Matrix LinearLayer::getBiasVector() const {
return b;
}
| a0ad8b8960564735600a72a6e5af5b00a3a767d2.cu | #include <stdlib.h>
#include <assert.h>
#include <iostream>
#include <random>
#include "linear_layer.hh"
#include "nn_exception.hh"
__global__ void linearLayerForward( float* W, float* A, float* Z, float* b,
int W_x_dim, int W_y_dim,
int A_x_dim, int A_y_dim) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int Z_x_dim = A_x_dim;
int Z_y_dim = W_y_dim;
float Z_value = 0;
if (row < Z_y_dim && col < Z_x_dim) {
for (int i = 0; i < W_x_dim; i++) {
Z_value += W[row * W_x_dim + i] * A[i * A_x_dim + col];
}
Z[row * Z_x_dim + col] = Z_value + b[row];
}
}
__global__ void linearLayerBackprop(float* W, float* dZ, float *dA,
int W_x_dim, int W_y_dim,
int dZ_x_dim, int dZ_y_dim) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
// W is treated as transposed
int dA_x_dim = dZ_x_dim;
int dA_y_dim = W_x_dim;
float dA_value = 0.0f;
if (row < dA_y_dim && col < dA_x_dim) {
for (int i = 0; i < W_y_dim; i++) {
dA_value += W[i * W_x_dim + row] * dZ[i * dZ_x_dim + col];
}
dA[row * dA_x_dim + col] = dA_value;
}
}
__global__ void linearLayerUpdateWeights( float* dZ, float* A, float* W,
int dZ_x_dim, int dZ_y_dim,
int A_x_dim, int A_y_dim,
float learning_rate) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
// A is treated as transposed
int W_x_dim = A_y_dim;
int W_y_dim = dZ_y_dim;
float dW_value = 0.0f;
if (row < W_y_dim && col < W_x_dim) {
for (int i = 0; i < dZ_x_dim; i++) {
dW_value += dZ[row * dZ_x_dim + i] * A[col * A_x_dim + i];
}
W[row * W_x_dim + col] = W[row * W_x_dim + col] - learning_rate * (dW_value / A_x_dim);
}
}
__global__ void linearLayerUpdateBias( float* dZ, float* b,
int dZ_x_dim, int dZ_y_dim,
int b_x_dim,
float learning_rate) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dZ_x_dim * dZ_y_dim) {
int dZ_x = index % dZ_x_dim;
int dZ_y = index / dZ_x_dim;
atomicAdd(&b[dZ_y], - learning_rate * (dZ[dZ_y * dZ_x_dim + dZ_x] / dZ_x_dim));
}
}
LinearLayer::LinearLayer(std::string name, Shape W_shape) :
W(W_shape), b(W_shape.y, 1)
{
this->name = name;
b.allocateMemory();
W.allocateMemory();
initializeBiasWithZeros();
initializeWeightsRandomly();
}
LinearLayer::~LinearLayer()
{ }
void LinearLayer::initializeWeightsRandomly() {
std::default_random_engine generator;
std::normal_distribution<float> normal_distribution(0.0, 1.0);
for (int x = 0; x < W.shape.x; x++) {
for (int y = 0; y < W.shape.y; y++) {
W[y * W.shape.x + x] = normal_distribution(generator) * weights_init_threshold;
}
}
}
void LinearLayer::initializeBiasWithZeros() {
for (int x = 0; x < b.shape.x; x++) {
b[x] = 0;
}
}
Matrix& LinearLayer::forward(Matrix& A) {
assert(W.shape.x == A.shape.y);
this->A = A;
Shape Z_shape(A.shape.x, W.shape.y);
Z.allocateMemoryIfNotAllocated(Z_shape);
computeAndStoreLayerOutput(A);
NNException::throwIfDeviceErrorsOccurred("Cannot perform linear layer forward propagation.");
return Z;
}
void LinearLayer::computeAndStoreLayerOutput(Matrix& A) {
dim3 block_size(8, 8);
dim3 num_of_blocks( (Z.shape.x + block_size.x - 1) / block_size.x,
(Z.shape.y + block_size.y - 1) / block_size.y);
linearLayerForward<<<num_of_blocks, block_size>>>( W.data.get(),
A.data.get(),
Z.data.get(),
b.data.get(),
W.shape.x, W.shape.y,
A.shape.x, A.shape.y);
}
Matrix& LinearLayer::backprop(Matrix& dZ, float learning_rate) {
dA.allocateMemoryIfNotAllocated(A.shape);
computeAndStoreBackpropError(dZ);
NNException::throwIfDeviceErrorsOccurred("Cannot perform back propagation.");
updateBias(dZ, learning_rate);
NNException::throwIfDeviceErrorsOccurred("Cannot perform bias update.");
updateWeights(dZ, learning_rate);
NNException::throwIfDeviceErrorsOccurred("Cannot perform weights update.");
return dA;
}
void LinearLayer::computeAndStoreBackpropError(Matrix& dZ) {
dim3 block_size(8, 8);
dim3 num_of_blocks( (A.shape.x + block_size.x - 1) / block_size.x,
(A.shape.y + block_size.y - 1) / block_size.y);
linearLayerBackprop<<<num_of_blocks, block_size>>>( W.data.get(),
dZ.data.get(),
dA.data.get(),
W.shape.x, W.shape.y,
dZ.shape.x, dZ.shape.y);
}
void LinearLayer::updateWeights(Matrix& dZ, float learning_rate) {
dim3 block_size(8, 8);
dim3 num_of_blocks( (W.shape.x + block_size.x - 1) / block_size.x,
(W.shape.y + block_size.y - 1) / block_size.y);
linearLayerUpdateWeights<<<num_of_blocks, block_size>>>(dZ.data.get(),
A.data.get(),
W.data.get(),
dZ.shape.x, dZ.shape.y,
A.shape.x, A.shape.y,
learning_rate);
}
void LinearLayer::updateBias(Matrix& dZ, float learning_rate) {
dim3 block_size(256);
dim3 num_of_blocks( (dZ.shape.y * dZ.shape.x + block_size.x - 1) / block_size.x);
linearLayerUpdateBias<<<num_of_blocks, block_size>>>(dZ.data.get(),
b.data.get(),
dZ.shape.x, dZ.shape.y,
b.shape.x, learning_rate);
}
int LinearLayer::getXDim() const {
return W.shape.x;
}
int LinearLayer::getYDim() const {
return W.shape.y;
}
Matrix LinearLayer::getWeightsMatrix() const {
return W;
}
Matrix LinearLayer::getBiasVector() const {
return b;
}
|
a0097d37eedb0a4a4ad7404b8021f7de7f72c2b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "PoolingLayer.h"
#include "CustomException.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define BLOCK_SIZE 32
texture<float, 2> InputMatrixesRef;
texture<float, 2> GradientMatrixesRef;
__global__ void cuda_pooling(float* result, float* prev_gradients, size_t gr_pitch, const int cols, const int rows, size_t fm_pitch, const int feature_map_cols, const int filter_size)
{
int block_x = blockDim.x * blockIdx.x + threadIdx.x;
int block_y = blockDim.y * blockIdx.y + threadIdx.y;
int block_z = blockDim.z * blockIdx.z + threadIdx.z;
int x = block_x * filter_size;
int y = block_y * filter_size;
if (x < cols && y < rows)
{
y *= cols;
int filter_upper_position = y + x;
int filter_right_border = __min(x + filter_size - 1, cols - 1) - x;
int filter_bottom_position = __min(filter_upper_position + cols * (filter_size - 1), x + cols * (rows - 1));
float* prev_gradients_start = (float*)((char*)prev_gradients + block_z * gr_pitch);
float max_val = _I32_MIN;
int max_i, max_j;
for (int i = filter_upper_position; i <= filter_bottom_position; i += cols)
{
for (int j = 0; j <= filter_right_border; j++)
{
float element = tex2D(InputMatrixesRef, i + j, block_z);
(element > max_val) ? (max_val = element, max_i = i, max_j = j) : (max_val);
prev_gradients_start[i + j] = 0;
}
}
prev_gradients_start[max_i + max_j] = 1.0f;
float* feature_map_matrix_start = (float*)((char*)result + block_z * fm_pitch);
int feature_map_position = block_y * feature_map_cols + block_x;
feature_map_matrix_start[feature_map_position] = max_val;
}
}
__global__ void cuda_generate_gradients(float* prev_gradients, size_t prev_gr_pitch, const int cols, const int rows, const int cur_gr_cols, const int filter_size)
{
int block_x = blockDim.x * blockIdx.x + threadIdx.x;
int block_y = blockDim.y * blockIdx.y + threadIdx.y;
int block_z = blockDim.z * blockIdx.z + threadIdx.z;
int x = block_x * filter_size;
int y = block_y * filter_size;
if (x < cols && y < rows)
{
y *= cols;
int filter_upper_position = y + x;
int filter_right_border = __min(x + filter_size - 1, cols - 1) - x;
int filter_bottom_position = __min(filter_upper_position + cols * (filter_size - 1), x + cols * (rows - 1));
float* prev_gradients_start = (float*)((char*)prev_gradients + block_z * prev_gr_pitch);
float element = tex2D(GradientMatrixesRef, block_y * cur_gr_cols + block_x, block_z);
for (int i = filter_upper_position; i <= filter_bottom_position; i += cols)
{
for (int j = 0; j <= filter_right_border; j++)
{
prev_gradients_start[i + j] *= element;
}
}
}
}
PoolingLayer::PoolingLayer(const int filter_size, const int outputs_size, const int outputs_depth) {
this->filter_size = filter_size;
gradients_device = Tensor(outputs_size, outputs_size, outputs_depth);
hipMallocPitch((void**)&gradients_device.data, &gradients_device.pitch, gradients_device.matrixes_size * sizeof(float), gradients_device.depth);
outputs_devices = Tensor(outputs_size, outputs_size, outputs_depth);
hipMallocPitch((void**)&outputs_devices.data, &outputs_devices.pitch, outputs_devices.matrixes_size * sizeof(float), outputs_devices.depth);
}
Tensor& PoolingLayer::forward(Tensor& input_matrixes, Tensor& prev_gradient_matrixes) {
inputs_device = input_matrixes;
hipBindTexture2D(0, InputMatrixesRef, inputs_device.data, InputMatrixesRef.channelDesc, inputs_device.matrixes_size, inputs_device.depth, inputs_device.pitch);
dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 blocksPerGrid = dim3(outputs_devices.cols_count / BLOCK_SIZE + (outputs_devices.cols_count % BLOCK_SIZE == 0 ? 0 : 1), outputs_devices.rows_count / BLOCK_SIZE + (outputs_devices.rows_count % BLOCK_SIZE == 0 ? 0 : 1), outputs_devices.depth);
cuda_pooling << <blocksPerGrid, threadsPerBlock >> > (outputs_devices.data, prev_gradient_matrixes.data, prev_gradient_matrixes.pitch, input_matrixes.cols_count, input_matrixes.rows_count, outputs_devices.pitch, outputs_devices.cols_count, filter_size);
cudacall(hipGetLastError());
hipUnbindTexture(InputMatrixesRef);
return outputs_devices;
}
void PoolingLayer::backward(Tensor& prev_gradient_matrixes) {
dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 blocksPerGrid = dim3(gradients_device.cols_count / BLOCK_SIZE + (gradients_device.cols_count % BLOCK_SIZE == 0 ? 0 : 1), gradients_device.rows_count / BLOCK_SIZE + (gradients_device.rows_count % BLOCK_SIZE == 0 ? 0 : 1), gradients_device.depth);
hipBindTexture2D(0, GradientMatrixesRef, gradients_device.data, GradientMatrixesRef.channelDesc, gradients_device.matrixes_size, gradients_device.depth, gradients_device.pitch);
cuda_generate_gradients << <blocksPerGrid, threadsPerBlock >> > (prev_gradient_matrixes.data, prev_gradient_matrixes.pitch, prev_gradient_matrixes.cols_count, prev_gradient_matrixes.rows_count, gradients_device.cols_count, filter_size);
cudacall(hipGetLastError());
hipUnbindTexture(InputMatrixesRef);
}
void PoolingLayer::freeMemory() {
hipFree(gradients_device.data);
hipFree(outputs_devices.data);
}
| a0097d37eedb0a4a4ad7404b8021f7de7f72c2b8.cu | #include <iostream>
#include "PoolingLayer.h"
#include "CustomException.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define BLOCK_SIZE 32
texture<float, 2> InputMatrixesRef;
texture<float, 2> GradientMatrixesRef;
__global__ void cuda_pooling(float* result, float* prev_gradients, size_t gr_pitch, const int cols, const int rows, size_t fm_pitch, const int feature_map_cols, const int filter_size)
{
int block_x = blockDim.x * blockIdx.x + threadIdx.x;
int block_y = blockDim.y * blockIdx.y + threadIdx.y;
int block_z = blockDim.z * blockIdx.z + threadIdx.z;
int x = block_x * filter_size;
int y = block_y * filter_size;
if (x < cols && y < rows)
{
y *= cols;
int filter_upper_position = y + x;
int filter_right_border = __min(x + filter_size - 1, cols - 1) - x;
int filter_bottom_position = __min(filter_upper_position + cols * (filter_size - 1), x + cols * (rows - 1));
float* prev_gradients_start = (float*)((char*)prev_gradients + block_z * gr_pitch);
float max_val = _I32_MIN;
int max_i, max_j;
for (int i = filter_upper_position; i <= filter_bottom_position; i += cols)
{
for (int j = 0; j <= filter_right_border; j++)
{
float element = tex2D(InputMatrixesRef, i + j, block_z);
(element > max_val) ? (max_val = element, max_i = i, max_j = j) : (max_val);
prev_gradients_start[i + j] = 0;
}
}
prev_gradients_start[max_i + max_j] = 1.0f;
float* feature_map_matrix_start = (float*)((char*)result + block_z * fm_pitch);
int feature_map_position = block_y * feature_map_cols + block_x;
feature_map_matrix_start[feature_map_position] = max_val;
}
}
__global__ void cuda_generate_gradients(float* prev_gradients, size_t prev_gr_pitch, const int cols, const int rows, const int cur_gr_cols, const int filter_size)
{
int block_x = blockDim.x * blockIdx.x + threadIdx.x;
int block_y = blockDim.y * blockIdx.y + threadIdx.y;
int block_z = blockDim.z * blockIdx.z + threadIdx.z;
int x = block_x * filter_size;
int y = block_y * filter_size;
if (x < cols && y < rows)
{
y *= cols;
int filter_upper_position = y + x;
int filter_right_border = __min(x + filter_size - 1, cols - 1) - x;
int filter_bottom_position = __min(filter_upper_position + cols * (filter_size - 1), x + cols * (rows - 1));
float* prev_gradients_start = (float*)((char*)prev_gradients + block_z * prev_gr_pitch);
float element = tex2D(GradientMatrixesRef, block_y * cur_gr_cols + block_x, block_z);
for (int i = filter_upper_position; i <= filter_bottom_position; i += cols)
{
for (int j = 0; j <= filter_right_border; j++)
{
prev_gradients_start[i + j] *= element;
}
}
}
}
PoolingLayer::PoolingLayer(const int filter_size, const int outputs_size, const int outputs_depth) {
this->filter_size = filter_size;
gradients_device = Tensor(outputs_size, outputs_size, outputs_depth);
cudaMallocPitch((void**)&gradients_device.data, &gradients_device.pitch, gradients_device.matrixes_size * sizeof(float), gradients_device.depth);
outputs_devices = Tensor(outputs_size, outputs_size, outputs_depth);
cudaMallocPitch((void**)&outputs_devices.data, &outputs_devices.pitch, outputs_devices.matrixes_size * sizeof(float), outputs_devices.depth);
}
Tensor& PoolingLayer::forward(Tensor& input_matrixes, Tensor& prev_gradient_matrixes) {
inputs_device = input_matrixes;
cudaBindTexture2D(0, InputMatrixesRef, inputs_device.data, InputMatrixesRef.channelDesc, inputs_device.matrixes_size, inputs_device.depth, inputs_device.pitch);
dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 blocksPerGrid = dim3(outputs_devices.cols_count / BLOCK_SIZE + (outputs_devices.cols_count % BLOCK_SIZE == 0 ? 0 : 1), outputs_devices.rows_count / BLOCK_SIZE + (outputs_devices.rows_count % BLOCK_SIZE == 0 ? 0 : 1), outputs_devices.depth);
cuda_pooling << <blocksPerGrid, threadsPerBlock >> > (outputs_devices.data, prev_gradient_matrixes.data, prev_gradient_matrixes.pitch, input_matrixes.cols_count, input_matrixes.rows_count, outputs_devices.pitch, outputs_devices.cols_count, filter_size);
cudacall(cudaGetLastError());
cudaUnbindTexture(InputMatrixesRef);
return outputs_devices;
}
void PoolingLayer::backward(Tensor& prev_gradient_matrixes) {
dim3 threadsPerBlock = dim3(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 blocksPerGrid = dim3(gradients_device.cols_count / BLOCK_SIZE + (gradients_device.cols_count % BLOCK_SIZE == 0 ? 0 : 1), gradients_device.rows_count / BLOCK_SIZE + (gradients_device.rows_count % BLOCK_SIZE == 0 ? 0 : 1), gradients_device.depth);
cudaBindTexture2D(0, GradientMatrixesRef, gradients_device.data, GradientMatrixesRef.channelDesc, gradients_device.matrixes_size, gradients_device.depth, gradients_device.pitch);
cuda_generate_gradients << <blocksPerGrid, threadsPerBlock >> > (prev_gradient_matrixes.data, prev_gradient_matrixes.pitch, prev_gradient_matrixes.cols_count, prev_gradient_matrixes.rows_count, gradients_device.cols_count, filter_size);
cudacall(cudaGetLastError());
cudaUnbindTexture(InputMatrixesRef);
}
void PoolingLayer::freeMemory() {
cudaFree(gradients_device.data);
cudaFree(outputs_devices.data);
}
|
b8f086ef0a6f409ec16766865c42d4c95d81e341.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 20 * (1 << 20);
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
| b8f086ef0a6f409ec16766865c42d4c95d81e341.cu | #include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 20 * (1 << 20);
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
0f3159558d41902b4f7fd94508bd072a1e55c864.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FilterProtein.cuh"
double findKey(double b, map<double, int>& m){
double result = -1;
map<double, int>::iterator it;
for (it = m.begin(); it!= m.end(); it++){
if (fabs(b - it->first) < 0.015){
result = it->second;
break;
}
}
return result;
}
void calWeight(const vector<double>& peaks, const vector<double>& masses){
clock_t start = clock();
map<double, int> crossLine;
for (int i = 0; i < masses.size(); i++){
for (int j = 0; j < peaks.size(); j++){
double b = peaks[j] - masses[i];
double d = findKey(b, crossLine);
if (d==-1){
crossLine.insert(pair<double, int>(b, 1));
}
else{
crossLine[d]++;
}
}
}
clock_t end = clock();
cout << "():" << end - start << "ms" << endl;
}
void approxConvolution(const vector<double>& a, const vector<double>& b, double e){
clock_t start = clock();
int n = a.size();
int m = b.size();
double ma = a[n - 1];
double mb = b[m - 1];
int max = (ma + mb) / e;
int score = 0;
vector<int> c(max+2, 0);
for (int i = 0; i < n; i++){
for (int j = 0; j < m; j++){
int d = (b[j] - a[i]+ma) / e;
c[d] = c[d] + 1;
}
}
for (int i = 0; i < max; i++){
int k = c[i] + c[i + 1];
if (k>score)
score = k;
}
clock_t end = clock();
//cout << "(approx1):" << end - start << "ms" << endl;
//cout << "score:" << score << endl;
}
int approxConvolution2(const vector<double>& masses, const vector<double>& peaks, double e){
//clock_t start = clock();
int n = masses.size();
int m = peaks.size();
double mb = peaks[m - 1];
double ma = masses[n - 1];
int score = 0;
int max = (ma + mb) / e;
//vector<int> c(max+2, 0);
int *c = new int[max+2];
memset(c,0,(max+2)*sizeof(int));
for (int i = 0; i < n; i++){
for (int j = 0; j < m; j++){
int d = (peaks[j] - masses[i]+ma) / e;
//cout << "d::" << d <<" m-j:"<<m-j<< endl;
c[d] = c[d] + 1;
if (c[d]>score){
score = c[d];
}
d++;
c[d] = c[d] + 1;
if (c[d]>score){
score = c[d];
}
}
}
free(c);
return score;
//clock_t end = clock();
//std::cout << "(approx2):" << end - start << "ms" << endl;
//cout << "p:" << dmax << endl;
//std::cout << "method2 score:" << score << endl;
}
vector<int> restrictedConvolution(vector<double>& a, vector<double>& b, double p, double delta, double e){
int n = a.size();
int m = b.size();
double ma = a[n - 1];
double mb = b[m - 1];
int max = (ma + mb) / e;
vector<int> c(max+2, 0);
int i = 0, j = 0;
while (i < n&&j < m){
int d = b[j] - a[i];
if (d < p){
j++;
}
else{
i++;
if (d < p + delta)
c[(d + ma) / e]++;
}
}
return c;
}
void exactConvolution(const vector<double>& a, const vector<double>& b, double e){
clock_t c_start = clock();
int n = a.size();
int m = b.size();
vector<double> c(m*n, 0);
for (int i = 0; i < n; i++){ //calculate the convolution array at resolution ee>e
for (int j = 0; j < m; j++){
c.push_back(b[j] - a[i]);
}
}
sort(c.begin(), c.end());
//cout << c[c.size()-1] << endl;
int score = 0;
int k = 0;
for (int i = 0; i < c.size(); i++){
k = 0;
double p = c[i];
if (p != 0){
for (int j = i + 1; j < c.size() && (j - i) < 50; j++){
if (c[j] < (p + e)){
k++;
//cout << c[i + j] << " " << c[i] + e << endl;
}
}
if (k>score){
score = k;
//cout << p << " " << score << endl;
}
}
}
clock_t c_end = clock();
//cout << "(exact:):" << c_end - c_start << "ms" << endl;
//cout << "exact score:" << score << endl;
}
int msFilter(const vector<double>& a, const vector<double>& b, double e){
//first convolution stage
double amino_acid_min = 75.06;
//clock_t c_start = clock();
int t = 5;
int n = a.size();
int m = b.size();
double ma = a[n - 1]; // maximum protein weight
double mb = b[m - 1]; //maximum spectrum
double ee = 1;
int maxee = (ma + mb) ;
vector<double> begin;
vector<double> end;
vector<double> diff;
//vector<int> index;
vector<int> tempd;
//vector<int> cc(maxee+2, 0);
int *cc = new int[maxee+2];
memset(cc,0,(maxee+2)*sizeof(int));
for (int i = 0; i < n; i++){ //calculate the convolution array at resolution ee>e
double protmass = a[i] + mb;
for (int j = 0; j < m; j++){
int d = protmass-b[j];
cc[d]++;
if (cc[d]>t){
//t = cc[d];
if (find(tempd.begin(), tempd.end(), d) == tempd.end()){
//index.push_back(j*n + i);
tempd.push_back(d);
diff.push_back( a[i]-b[j]);
}
}
}
}
//cout << "1Da num:" << index.size() << endl;
//stage2
//clock_t stage2s = clock();
sort(diff.begin(), diff.end());
for (int i = 0; i < diff.size(); i++){
if (i+1<diff.size()&&diff[i + 1] - diff[i]+2*ee < amino_acid_min){
begin.push_back(diff[i] - ee);
end.push_back(diff[i + 1] + ee);
i++;
}
else{
begin.push_back(diff[i] - ee);
end.push_back(diff[i] + ee);
}
}
t = 0;
//int max = (ma + mb) / e;
//vector<int> c(max+2, 0);
//int *c = new int[max+2];
//memset(c,0,(max+2)*sizeof(int));
//clock_t stage2e = clock();
for (int k = 0; k < begin.size(); k++){
int i = 0, j = 0;
double first = begin[k];
double last = end[k];
int limit = ((last - first) / e) + 2;
int *c = new int[limit];
memset(c, 0, (limit)*sizeof(int));
/*
if (k>0 && diff[index[k]] - diff[index[k - 1]] < ee){
p = diff[index[k - 1]];
delta = diff[index[k]] - diff[index[k - 1]];
}*/
//cout << diff[index[k]] << " ";
while (i < n&&j < m){
double d = a[i] - b[j];
if (d >= last){
j++;
}
else{
i++;
if (d>first){
// here important
int indexd = (d - first) / e;
c[indexd] ++;
/*
if (c[indexd]>t){
t = c[indexd];
}
indexd++;
c[indexd] ++;
if (c[indexd] > t){
t = c[indexd];
}
*/
}
}
}
int sum = c[limit - 1]+c[limit-2];
int pos = limit - 3;
while (pos >= 0){
sum += c[pos];
sum -= c[pos + 2];
pos--;
if (sum > t)
t = sum + 1;
}
free(c);
}
free(cc);
return t;
//return t;
//clock_t c_end = clock();
//cout << "stage2:" << stage2e - stage2s << endl;
//cout << "(msFilter:):" << c_end - c_start << "ms" << endl;
//cout << "msFilter score:" << t << endl;
//cout << "score:" << score << endl;
}
void msFilterOneSpectra(const vector<vector<double>>& a, const vector<double>& b, double e){
//first convolution stage
double amino_acid_min = 75.06;
//clock_t c_start = clock();
for (int idx_mass = 0; idx_mass < a.size(); idx_mass++){
int t = 5;
int n = a[idx_mass].size();
int m = b.size();
double ma = a[idx_mass][n - 1];
double mb = b[m - 1];
double ee = 1;
int maxee = (ma + mb) / ee;
vector<double> begin;
vector<double> end;
vector<double> diff;
//vector<int> index;
vector<int> tempd;
//vector<int> cc(maxee+2, 0);
int *cc = new int[maxee + 2];
memset(cc, 0, (maxee + 2)*sizeof(int));
for (int i = 0; i < n; i++){ //calculate the convolution array at resolution ee>e
for (int j = 0; j < m; j++){
int d = (b[j] - a[idx_mass][i] + ma) / ee;
cc[d] = cc[d] + 1;
if (cc[d]>t){
t = cc[d];
if (find(tempd.begin(), tempd.end(), d) == tempd.end()){
//index.push_back(j*n + i);
tempd.push_back(d);
diff.push_back(b[j] - a[idx_mass][i]);
}
}
}
}
//cout << "1Da num:" << index.size() << endl;
//stage2
clock_t stage2s = clock();
sort(diff.begin(), diff.end());
for (int i = 0; i < diff.size(); i++){
if (i + 1 < diff.size() && diff[i + 1] - diff[i] + 2 * ee < amino_acid_min){
begin.push_back(diff[i] - ee);
end.push_back(diff[i + 1] + ee);
i++;
}
else{
begin.push_back(diff[i] - ee);
end.push_back(diff[i] + ee);
}
}
t = 0;
int max = (ma + mb) / e;
//vector<int> c(max+2, 0);
int *c = new int[max + 2];
memset(c, 0, (max + 2)*sizeof(int));
//clock_t stage2e = clock();
for (int k = 0; k < begin.size(); k++){
int i = 0, j = 0;
double first = begin[k];
double last = end[k];
/*
if (k>0 && diff[index[k]] - diff[index[k - 1]] < ee){
p = diff[index[k - 1]];
delta = diff[index[k]] - diff[index[k - 1]];
}*/
//cout << diff[index[k]] << " ";
while (i < n&&j < m){
double d = b[j] - a[idx_mass][i];
if (d < first){
j++;
}
else{
i++;
if (d < last){
// cout << d << endl;
int indexd = (d + ma) / e;
c[indexd] = c[indexd] + 1;;
if (c[indexd]>t){
t = c[indexd];
}
indexd++;
c[indexd] = c[indexd] + 1;
if (c[indexd] > t){
t = c[indexd];
}
}
}
}
}
free(cc);
free(c);
//return t;
//clock_t c_end = clock();
//cout << "stage2:" << stage2e - stage2s << endl;
//cout << "(msFilter stage1:):" << c_end - c_start << "ms" << endl;
//cout << "cpu msFilter:" << t << endl;
//cout << "score:" << score << endl;
}
}
/*
void filterProtein(vector<vector<mass_t>>& cutMasses,vector<MonoSpectrum>& spectrum,vector<vector<mass_t>>& cutPeaksMonoMass){
clock_t msfilter_start = clock();
char *path = "output\\time_result.txt";
ofstream out(path);
int limit = 800;
//double parentMass2 = spectrum[2].getParentMonoMass();
int spectraSize = min(spectrum.size(), cutPeaksMonoMass.size());
vector<double> parentMasses;
for (int i = 0; i < spectraSize ; i++){
parentMasses.push_back(spectrum[i].getParentMonoMass());
}
vector<vector<int>> index(spectraSize );
int filterAllSize = 0;
for (int i = 0; i < spectraSize ; i++){
for (int j = 0; j < cutMasses.size(); j++){
if (fabs(parentMasses[i] - cutMasses[j][cutMasses[j].size() - 1]) < limit)
index[i].push_back(j);
}
filterAllSize += index[i].size();
}
out << ":" << filterAllSize / spectraSize << endl;
for (int i = 0; i<spectraSize ; i++){
for (int j = 0; j < index[i].size(); j++){
approxConvolution2(cutMasses[index[i][j]], cutPeaksMonoMass[i], 0.015);
}
}
Config config;
config.init_with_defaults();
vector<PTM> list = config.getAllPTMs();
vector<mass_t> ptm;
for (int j = 0; j < list.size(); j++){
ptm.push_back(list[j].delta);
}
thrust::device_vector<double> ddelta = ptm;
clock_t msfilter_end = clock();
out << "ms-filter time:" << msfilter_end - msfilter_start << "ms" << endl;
out<<"ms-filter time:" << (msfilter_end - msfilter_start)/60000 << "min" << endl;
//cout << ":"<<index.size() << endl;
}
*/
void testTime(vector<vector<mass_t>>& cutMasses, vector<vector<mass_t>>& cutPeaksMonoMass){
int proteinSize = cutMasses.size();
double e = 0.2;
clock_t begin_exact = clock();
for (int i = 0; i < proteinSize; i++){
exactConvolution(cutMasses[i], cutPeaksMonoMass[5],e);
}
clock_t end_exact = clock();
cout << "exact time:" << end_exact - begin_exact << "ms" << endl;
clock_t begin_app = clock();
for (int i = 0; i < proteinSize; i++){
approxConvolution(cutMasses[i], cutPeaksMonoMass[5],e);
}
clock_t end_app = clock();
cout << "approx time:" << end_app - begin_app<< "ms" << endl;
clock_t begin_app2 = clock();
for (int i = 0; i < proteinSize; i++){
approxConvolution2(cutMasses[i], cutPeaksMonoMass[5],e);
}
clock_t end_app2 = clock();
cout << "approx2 time:" << end_app2 - begin_app2<< "ms" << endl;
clock_t begin_ms = clock();
for (int i = 0; i < proteinSize; i++){
msFilter(cutMasses[i], cutPeaksMonoMass[5],e);
}
clock_t end_ms = clock();
cout << "ms-filter time:" << end_ms - begin_ms<< "ms" << endl;
}
/*
void gpuFilter(vector<vector<mass_t>>& cutMasses,vector<vector<mass_t>>& cutPeaksMonoMass){
thrust::host_vector<double> host_cutPeaks0;
for (int i = 0; i < cutPeaksMonoMass[0].size(); i++){
host_cutPeaks0.push_back(cutPeaksMonoMass[0][i]);
}
thrust::host_vector<int> cutMass_begin;
thrust::host_vector<int> cutMass_end;
thrust::host_vector<double> cutMassesTemp;
thrust::device_vector<double> device_cutPeaks0 = host_cutPeaks0;
int index = 0;
for (int i = 0; i < cutMasses.size(); i++){
cutMass_begin.push_back(index);
for (int j = 0; j < cutMasses[i].size(); j++){
cutMassesTemp.push_back(cutMasses[i][j]);
}
index += cutMasses[i].size() - 1;
cutMass_end.push_back(index);
index++;
}
thrust::device_vector<int> device_cutMass_begin(cutMass_begin.size());
thrust::device_vector<int> device_cutMass_end(cutMass_end.size());
thrust::device_vector<double> device_cutMasses(cutMassesTemp.size());
device_cutMass_begin = cutMass_begin;
device_cutMass_end = cutMass_end;
device_cutMasses = cutMassesTemp;
char *path = "output\\time_result.txt";
ofstream out(path);
//cout << cutMassesTemp[cutMass_end[6]] << " " << host_cutPeaks0[host_cutPeaks0.size() - 1] << endl;
//cout << "5 test:"<<(cutMassesTemp[cutMass_end[6]]+ host_cutPeaks0[host_cutPeaks0.size()-1])/0.015<< endl;
out << "device_cutMass_begin.size() " << device_cutMass_begin.size() << endl;
out << "device_cutMass_end.size() " << device_cutMass_end.size() << endl;
out << "device_cutMasses.size() " << device_cutMasses.size() << endl;
out << "device_cutPeaks0.size() " << device_cutPeaks0.size() << endl;
clock_t dev_start = clock();
//hipEvent_t device_start0, device_stop0;
//hipEventCreate(&device_start0);
//hipEventCreate(&device_stop0);
//hipEventRecord(device_start0, NULL);
//cout << "here" << endl;
//testt << <1, 1 >> >();
//test protein filter GPU
//thrust::device_vector<double> device_cutMass2 = cutMasses[2];
hipDeviceSetLimit(hipLimitMallocHeapSize, 100 * 1024*1024);
//gpuFilter << <1, 3 >> >();
//gpuFilter << <1, 1 >> >(thrust::raw_pointer_cast(&device_cutMass_begin[0]), thrust::raw_pointer_cast(&device_cutMass_end[0]), thrust::raw_pointer_cast(&device_cutMasses[0]), thrust::raw_pointer_cast(&device_cutPeaks0[0]), device_cutPeaks0.size(),proteins.size());
//gpuApproxConvolution<<<1,1>>>(thrust::raw_pointer_cast(&device_cutMass_begin[0]), thrust::raw_pointer_cast(&device_cutMass_end[0]), thrust::raw_pointer_cast(&device_cutMasses[0]), thrust::raw_pointer_cast(&device_cutPeaks0[0]), device_cutPeaks0.size(),proteins.size());
//hipSetDeviceFlags(hipDeviceMapHost);
//const int fi = 1;
//hipStream_t stream[fi];
//for (int i = 0; i < fi; i++){
// hipStreamCreate(&stream[i]);
for (int j = 0; j < 10; j++){
int testIndex = j;
int ma_size = cutMasses[testIndex].size();
int mb_size = cutPeaksMonoMass[0].size();
thrust::device_vector<double> cutMassTest(ma_size);
cutMassTest = cutMasses[testIndex];
thrust::device_vector<double> cutPeakTest(mb_size);
cutPeakTest = cutPeaksMonoMass[0];
double ma = cutMasses[testIndex][ma_size - 1];
double mb = cutPeaksMonoMass[0][mb_size - 1];
int num = ma_size*mb_size;
int max = ((ma + mb) / 0.015) + 2;
int *c;
//CUDA_CALL(hipHostMalloc((void**)&c, max*sizeof(int), hipHostMallocWriteCombined | hipHostMallocMapped));
hipMalloc((void**)&c, sizeof(int)* max);
int *dev_score;
hipMalloc((void**)&dev_score, sizeof(int)* 1);
//int *dev_c;
int score[1] = { 0 };
// int *hc=new int[max];
hipMemcpy(dev_score, score, sizeof(int), hipMemcpyHostToDevice);
//CUDA_CALL(hipHostGetDevicePointer(&dev_c, c, 0));
//hipMemcpy(gpudata, data, sizeof(int) * DATA_SIZE,hipMemcpyHostToDevice);
calScore << <num / 256+1, 256 >> >(thrust::raw_pointer_cast(&cutMassTest[0]), thrust::raw_pointer_cast(&cutPeakTest[0]), ma_size, mb_size, ma, mb, 0.015, c, num, dev_score, max);
//hipDeviceSynchronize();
hipMemcpy(score, dev_score, sizeof(int), hipMemcpyDeviceToHost);
//hipMemcpy(hc, c, sizeof(int)*max, hipMemcpyDeviceToHost);
//int *tempresult = max_element(hc, hc + max);
cout << ":" << score[0] << endl;
//cout << ":" << testIndex << endl;
hipHostFree(c);
hipHostFree(dev_score);
}
//}
//for (int i = 0; i < fi;i++)
// hipStreamDestroy(stream[i]);
clock_t dev_end = clock();
//hipEventRecord(device_stop0, NULL);
//hipEventSynchronize(device_start0);
//hipEventSynchronize(device_stop0);
//float msecTotal0 = 0.0f;
//hipEventElapsedTime(&msecTotal0, device_start0, device_stop0);
out << " Time GPU protein filter:" << dev_end-dev_start << "ms"<<endl;
}
__global__ void msFiltergpu(int* cutMass_begin, int* cutMass_end, double* a,double* b,int m){
int idx = blockDim.x*blockIdx.x + threadIdx.x;
//first convolution stage
double amino_acid_min = 75.06;
//clock_t c_start = clock();
int t = 8;
int index_start = cutMass_begin[idx];
int index_end = cutMass_end[idx];
int n = index_end-index_start+1;
double ma = a[index_end];
double mb = b[m - 1];
double ee = 1;
int maxee = (ma + mb) / ee;
thrust::device_vector<double> begin;
thrust::device_vector<double> end;
thrust::device_vector<double> diff;
//vector<int> index;
thrust::device_vector<int> tempd;
thrust::device_vector<int> cc(maxee+2, 0);
for (int i = index_start; i < index_end+1; i++){ //calculate the convolution array at resolution ee>e
for (int j = 0; j < m; j++){
int d = (b[j] - a[i]+ma) / ee;
cc[d] = cc[d] + 1;
if (cc[d]>t){
//find function
if (!findVal(thrust::raw_pointer_cast(&tempd[0]),d,tempd.size())){
//index.push_back(j*n + i);
tempd.push_back(d);
diff.push_back( b[j] - a[i]);
}
}
}
}
//cout << "1Da num:" << index.size() << endl;
//stage2
thrust::sort(diff.begin(), diff.end());
double e = 0.015;
for (int i = 0; i < diff.size(); i++){
if (i+1<diff.size()&&diff[i + 1] - diff[i]+2*ee < amino_acid_min){
begin.push_back(diff[i] - ee);
end.push_back(diff[i + 1] + ee);
i++;
}
else{
begin.push_back(diff[i] - ee);
end.push_back(diff[i] + ee);
}
}
t = 0;
int max = (ma + mb) / e;
thrust::device_vector<int> c(max+2, 0);
for (int k= 0; k < begin.size(); k++){
int i = 0, j = 0;
double first = begin[k];
double last = end[k];
//cout << diff[index[k]] << " ";
while (i < n&&j < m){
double d = b[j] - a[index_start+i];
if (d < first){
j++;
}
else{
i++;
if (d < last){
// cout << d << endl;
int indexd = (d + ma) / e;
c[indexd] = c[indexd] + 1;;
if (c[indexd]>t){
t = c[indexd];
}
indexd++;
c[indexd]=c[indexd]+1;
if (c[indexd] > t){
t = c[indexd];
}
}
}
}
}
printf("gpu msFilter: %d", t);
}
__global__ void testt(){
int idx = blockDim.x*blockIdx.x + threadIdx.x;
printf("idx: %d", idx);
}
*/ | 0f3159558d41902b4f7fd94508bd072a1e55c864.cu | #include "FilterProtein.cuh"
double findKey(double b, map<double, int>& m){
double result = -1;
map<double, int>::iterator it;
for (it = m.begin(); it!= m.end(); it++){
if (fabs(b - it->first) < 0.015){
result = it->second;
break;
}
}
return result;
}
void calWeight(const vector<double>& peaks, const vector<double>& masses){
clock_t start = clock();
map<double, int> crossLine;
for (int i = 0; i < masses.size(); i++){
for (int j = 0; j < peaks.size(); j++){
double b = peaks[j] - masses[i];
double d = findKey(b, crossLine);
if (d==-1){
crossLine.insert(pair<double, int>(b, 1));
}
else{
crossLine[d]++;
}
}
}
clock_t end = clock();
cout << "一次计算分数时间(自己的版本):" << end - start << "ms" << endl;
}
void approxConvolution(const vector<double>& a, const vector<double>& b, double e){
clock_t start = clock();
int n = a.size();
int m = b.size();
double ma = a[n - 1];
double mb = b[m - 1];
int max = (ma + mb) / e;
int score = 0;
vector<int> c(max+2, 0);
for (int i = 0; i < n; i++){
for (int j = 0; j < m; j++){
int d = (b[j] - a[i]+ma) / e;
c[d] = c[d] + 1;
}
}
for (int i = 0; i < max; i++){
int k = c[i] + c[i + 1];
if (k>score)
score = k;
}
clock_t end = clock();
//cout << "一次计算分数时间(approx版本1):" << end - start << "ms" << endl;
//cout << "score:" << score << endl;
}
int approxConvolution2(const vector<double>& masses, const vector<double>& peaks, double e){
//clock_t start = clock();
int n = masses.size();
int m = peaks.size();
double mb = peaks[m - 1];
double ma = masses[n - 1];
int score = 0;
int max = (ma + mb) / e;
//vector<int> c(max+2, 0);
int *c = new int[max+2];
memset(c,0,(max+2)*sizeof(int));
for (int i = 0; i < n; i++){
for (int j = 0; j < m; j++){
int d = (peaks[j] - masses[i]+ma) / e;
//cout << "d::" << d <<" m-j:"<<m-j<< endl;
c[d] = c[d] + 1;
if (c[d]>score){
score = c[d];
}
d++;
c[d] = c[d] + 1;
if (c[d]>score){
score = c[d];
}
}
}
free(c);
return score;
//clock_t end = clock();
//std::cout << "一次计算分数时间(approx版本2):" << end - start << "ms" << endl;
//cout << "p值:" << dmax << endl;
//std::cout << "method2 score:" << score << endl;
}
vector<int> restrictedConvolution(vector<double>& a, vector<double>& b, double p, double delta, double e){
int n = a.size();
int m = b.size();
double ma = a[n - 1];
double mb = b[m - 1];
int max = (ma + mb) / e;
vector<int> c(max+2, 0);
int i = 0, j = 0;
while (i < n&&j < m){
int d = b[j] - a[i];
if (d < p){
j++;
}
else{
i++;
if (d < p + delta)
c[(d + ma) / e]++;
}
}
return c;
}
void exactConvolution(const vector<double>& a, const vector<double>& b, double e){
clock_t c_start = clock();
int n = a.size();
int m = b.size();
vector<double> c(m*n, 0);
for (int i = 0; i < n; i++){ //calculate the convolution array at resolution ee>e
for (int j = 0; j < m; j++){
c.push_back(b[j] - a[i]);
}
}
sort(c.begin(), c.end());
//cout << c[c.size()-1] << endl;
int score = 0;
int k = 0;
for (int i = 0; i < c.size(); i++){
k = 0;
double p = c[i];
if (p != 0){
for (int j = i + 1; j < c.size() && (j - i) < 50; j++){
if (c[j] < (p + e)){
k++;
//cout << c[i + j] << " " << c[i] + e << endl;
}
}
if (k>score){
score = k;
//cout << p << " " << score << endl;
}
}
}
clock_t c_end = clock();
//cout << "一次计算分数时间(exact:):" << c_end - c_start << "ms" << endl;
//cout << "exact score:" << score << endl;
}
int msFilter(const vector<double>& a, const vector<double>& b, double e){
//first convolution stage
double amino_acid_min = 75.06;
//clock_t c_start = clock();
int t = 5;
int n = a.size();
int m = b.size();
double ma = a[n - 1]; // maximum protein weight
double mb = b[m - 1]; //maximum spectrum
double ee = 1;
int maxee = (ma + mb) ;
vector<double> begin;
vector<double> end;
vector<double> diff;
//vector<int> index;
vector<int> tempd;
//vector<int> cc(maxee+2, 0);
int *cc = new int[maxee+2];
memset(cc,0,(maxee+2)*sizeof(int));
for (int i = 0; i < n; i++){ //calculate the convolution array at resolution ee>e
double protmass = a[i] + mb;
for (int j = 0; j < m; j++){
int d = protmass-b[j];
cc[d]++;
if (cc[d]>t){
//t = cc[d];
if (find(tempd.begin(), tempd.end(), d) == tempd.end()){
//index.push_back(j*n + i);
tempd.push_back(d);
diff.push_back( a[i]-b[j]);
}
}
}
}
//cout << "1Da num:" << index.size() << endl;
//stage2
//clock_t stage2s = clock();
sort(diff.begin(), diff.end());
for (int i = 0; i < diff.size(); i++){
if (i+1<diff.size()&&diff[i + 1] - diff[i]+2*ee < amino_acid_min){
begin.push_back(diff[i] - ee);
end.push_back(diff[i + 1] + ee);
i++;
}
else{
begin.push_back(diff[i] - ee);
end.push_back(diff[i] + ee);
}
}
t = 0;
//int max = (ma + mb) / e;
//vector<int> c(max+2, 0);
//int *c = new int[max+2];
//memset(c,0,(max+2)*sizeof(int));
//clock_t stage2e = clock();
for (int k = 0; k < begin.size(); k++){
int i = 0, j = 0;
double first = begin[k];
double last = end[k];
int limit = ((last - first) / e) + 2;
int *c = new int[limit];
memset(c, 0, (limit)*sizeof(int));
/*
if (k>0 && diff[index[k]] - diff[index[k - 1]] < ee){
p = diff[index[k - 1]];
delta = diff[index[k]] - diff[index[k - 1]];
}*/
//cout << diff[index[k]] << " ";
while (i < n&&j < m){
double d = a[i] - b[j];
if (d >= last){
j++;
}
else{
i++;
if (d>first){
// here important
int indexd = (d - first) / e;
c[indexd] ++;
/*
if (c[indexd]>t){
t = c[indexd];
}
indexd++;
c[indexd] ++;
if (c[indexd] > t){
t = c[indexd];
}
*/
}
}
}
int sum = c[limit - 1]+c[limit-2];
int pos = limit - 3;
while (pos >= 0){
sum += c[pos];
sum -= c[pos + 2];
pos--;
if (sum > t)
t = sum + 1;
}
free(c);
}
free(cc);
return t;
//return t;
//clock_t c_end = clock();
//cout << "stage2:" << stage2e - stage2s << endl;
//cout << "一次计算分数时间(msFilter:):" << c_end - c_start << "ms" << endl;
//cout << "msFilter score:" << t << endl;
//cout << "score:" << score << endl;
}
void msFilterOneSpectra(const vector<vector<double>>& a, const vector<double>& b, double e){
//first convolution stage
double amino_acid_min = 75.06;
//clock_t c_start = clock();
for (int idx_mass = 0; idx_mass < a.size(); idx_mass++){
int t = 5;
int n = a[idx_mass].size();
int m = b.size();
double ma = a[idx_mass][n - 1];
double mb = b[m - 1];
double ee = 1;
int maxee = (ma + mb) / ee;
vector<double> begin;
vector<double> end;
vector<double> diff;
//vector<int> index;
vector<int> tempd;
//vector<int> cc(maxee+2, 0);
int *cc = new int[maxee + 2];
memset(cc, 0, (maxee + 2)*sizeof(int));
for (int i = 0; i < n; i++){ //calculate the convolution array at resolution ee>e
for (int j = 0; j < m; j++){
int d = (b[j] - a[idx_mass][i] + ma) / ee;
cc[d] = cc[d] + 1;
if (cc[d]>t){
t = cc[d];
if (find(tempd.begin(), tempd.end(), d) == tempd.end()){
//index.push_back(j*n + i);
tempd.push_back(d);
diff.push_back(b[j] - a[idx_mass][i]);
}
}
}
}
//cout << "1Da num:" << index.size() << endl;
//stage2
clock_t stage2s = clock();
sort(diff.begin(), diff.end());
for (int i = 0; i < diff.size(); i++){
if (i + 1 < diff.size() && diff[i + 1] - diff[i] + 2 * ee < amino_acid_min){
begin.push_back(diff[i] - ee);
end.push_back(diff[i + 1] + ee);
i++;
}
else{
begin.push_back(diff[i] - ee);
end.push_back(diff[i] + ee);
}
}
t = 0;
int max = (ma + mb) / e;
//vector<int> c(max+2, 0);
int *c = new int[max + 2];
memset(c, 0, (max + 2)*sizeof(int));
//clock_t stage2e = clock();
for (int k = 0; k < begin.size(); k++){
int i = 0, j = 0;
double first = begin[k];
double last = end[k];
/*
if (k>0 && diff[index[k]] - diff[index[k - 1]] < ee){
p = diff[index[k - 1]];
delta = diff[index[k]] - diff[index[k - 1]];
}*/
//cout << diff[index[k]] << " ";
while (i < n&&j < m){
double d = b[j] - a[idx_mass][i];
if (d < first){
j++;
}
else{
i++;
if (d < last){
// cout << d << endl;
int indexd = (d + ma) / e;
c[indexd] = c[indexd] + 1;;
if (c[indexd]>t){
t = c[indexd];
}
indexd++;
c[indexd] = c[indexd] + 1;
if (c[indexd] > t){
t = c[indexd];
}
}
}
}
}
free(cc);
free(c);
//return t;
//clock_t c_end = clock();
//cout << "stage2:" << stage2e - stage2s << endl;
//cout << "一次计算分数时间(msFilter stage1:):" << c_end - c_start << "ms" << endl;
//cout << "cpu msFilter:" << t << endl;
//cout << "score:" << score << endl;
}
}
/*
void filterProtein(vector<vector<mass_t>>& cutMasses,vector<MonoSpectrum>& spectrum,vector<vector<mass_t>>& cutPeaksMonoMass){
clock_t msfilter_start = clock();
char *path = "output\\time_result.txt";
ofstream out(path);
int limit = 800;
//double parentMass2 = spectrum[2].getParentMonoMass();
int spectraSize = min(spectrum.size(), cutPeaksMonoMass.size());
vector<double> parentMasses;
for (int i = 0; i < spectraSize ; i++){
parentMasses.push_back(spectrum[i].getParentMonoMass());
}
vector<vector<int>> index(spectraSize );
int filterAllSize = 0;
for (int i = 0; i < spectraSize ; i++){
for (int j = 0; j < cutMasses.size(); j++){
if (fabs(parentMasses[i] - cutMasses[j][cutMasses[j].size() - 1]) < limit)
index[i].push_back(j);
}
filterAllSize += index[i].size();
}
out << "每个谱的平均蛋白质个数:" << filterAllSize / spectraSize << endl;
for (int i = 0; i<spectraSize ; i++){
for (int j = 0; j < index[i].size(); j++){
approxConvolution2(cutMasses[index[i][j]], cutPeaksMonoMass[i], 0.015);
}
}
Config config;
config.init_with_defaults();
vector<PTM> list = config.getAllPTMs();
vector<mass_t> ptm;
for (int j = 0; j < list.size(); j++){
ptm.push_back(list[j].delta);
}
thrust::device_vector<double> ddelta = ptm;
clock_t msfilter_end = clock();
out << "ms-filter time:" << msfilter_end - msfilter_start << "ms" << endl;
out<<"ms-filter time:" << (msfilter_end - msfilter_start)/60000 << "min" << endl;
//cout << "过滤出:"<<index.size() << endl;
}
*/
void testTime(vector<vector<mass_t>>& cutMasses, vector<vector<mass_t>>& cutPeaksMonoMass){
int proteinSize = cutMasses.size();
double e = 0.2;
clock_t begin_exact = clock();
for (int i = 0; i < proteinSize; i++){
exactConvolution(cutMasses[i], cutPeaksMonoMass[5],e);
}
clock_t end_exact = clock();
cout << "exact time:" << end_exact - begin_exact << "ms" << endl;
clock_t begin_app = clock();
for (int i = 0; i < proteinSize; i++){
approxConvolution(cutMasses[i], cutPeaksMonoMass[5],e);
}
clock_t end_app = clock();
cout << "approx time:" << end_app - begin_app<< "ms" << endl;
clock_t begin_app2 = clock();
for (int i = 0; i < proteinSize; i++){
approxConvolution2(cutMasses[i], cutPeaksMonoMass[5],e);
}
clock_t end_app2 = clock();
cout << "approx2 time:" << end_app2 - begin_app2<< "ms" << endl;
clock_t begin_ms = clock();
for (int i = 0; i < proteinSize; i++){
msFilter(cutMasses[i], cutPeaksMonoMass[5],e);
}
clock_t end_ms = clock();
cout << "ms-filter time:" << end_ms - begin_ms<< "ms" << endl;
}
/*
void gpuFilter(vector<vector<mass_t>>& cutMasses,vector<vector<mass_t>>& cutPeaksMonoMass){
thrust::host_vector<double> host_cutPeaks0;
for (int i = 0; i < cutPeaksMonoMass[0].size(); i++){
host_cutPeaks0.push_back(cutPeaksMonoMass[0][i]);
}
thrust::host_vector<int> cutMass_begin;
thrust::host_vector<int> cutMass_end;
thrust::host_vector<double> cutMassesTemp;
thrust::device_vector<double> device_cutPeaks0 = host_cutPeaks0;
int index = 0;
for (int i = 0; i < cutMasses.size(); i++){
cutMass_begin.push_back(index);
for (int j = 0; j < cutMasses[i].size(); j++){
cutMassesTemp.push_back(cutMasses[i][j]);
}
index += cutMasses[i].size() - 1;
cutMass_end.push_back(index);
index++;
}
thrust::device_vector<int> device_cutMass_begin(cutMass_begin.size());
thrust::device_vector<int> device_cutMass_end(cutMass_end.size());
thrust::device_vector<double> device_cutMasses(cutMassesTemp.size());
device_cutMass_begin = cutMass_begin;
device_cutMass_end = cutMass_end;
device_cutMasses = cutMassesTemp;
char *path = "output\\time_result.txt";
ofstream out(path);
//cout << cutMassesTemp[cutMass_end[6]] << " " << host_cutPeaks0[host_cutPeaks0.size() - 1] << endl;
//cout << "5 test:"<<(cutMassesTemp[cutMass_end[6]]+ host_cutPeaks0[host_cutPeaks0.size()-1])/0.015<< endl;
out << "device_cutMass_begin.size() " << device_cutMass_begin.size() << endl;
out << "device_cutMass_end.size() " << device_cutMass_end.size() << endl;
out << "device_cutMasses.size() " << device_cutMasses.size() << endl;
out << "device_cutPeaks0.size() " << device_cutPeaks0.size() << endl;
clock_t dev_start = clock();
//cudaEvent_t device_start0, device_stop0;
//cudaEventCreate(&device_start0);
//cudaEventCreate(&device_stop0);
//cudaEventRecord(device_start0, NULL);
//cout << "here" << endl;
//testt << <1, 1 >> >();
//test protein filter GPU
//thrust::device_vector<double> device_cutMass2 = cutMasses[2];
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 100 * 1024*1024);
//gpuFilter << <1, 3 >> >();
//gpuFilter << <1, 1 >> >(thrust::raw_pointer_cast(&device_cutMass_begin[0]), thrust::raw_pointer_cast(&device_cutMass_end[0]), thrust::raw_pointer_cast(&device_cutMasses[0]), thrust::raw_pointer_cast(&device_cutPeaks0[0]), device_cutPeaks0.size(),proteins.size());
//gpuApproxConvolution<<<1,1>>>(thrust::raw_pointer_cast(&device_cutMass_begin[0]), thrust::raw_pointer_cast(&device_cutMass_end[0]), thrust::raw_pointer_cast(&device_cutMasses[0]), thrust::raw_pointer_cast(&device_cutPeaks0[0]), device_cutPeaks0.size(),proteins.size());
//cudaSetDeviceFlags(cudaDeviceMapHost);
//const int fi = 1;
//cudaStream_t stream[fi];
//for (int i = 0; i < fi; i++){
// cudaStreamCreate(&stream[i]);
for (int j = 0; j < 10; j++){
int testIndex = j;
int ma_size = cutMasses[testIndex].size();
int mb_size = cutPeaksMonoMass[0].size();
thrust::device_vector<double> cutMassTest(ma_size);
cutMassTest = cutMasses[testIndex];
thrust::device_vector<double> cutPeakTest(mb_size);
cutPeakTest = cutPeaksMonoMass[0];
double ma = cutMasses[testIndex][ma_size - 1];
double mb = cutPeaksMonoMass[0][mb_size - 1];
int num = ma_size*mb_size;
int max = ((ma + mb) / 0.015) + 2;
int *c;
//CUDA_CALL(cudaHostAlloc((void**)&c, max*sizeof(int), cudaHostAllocWriteCombined | cudaHostAllocMapped));
cudaMalloc((void**)&c, sizeof(int)* max);
int *dev_score;
cudaMalloc((void**)&dev_score, sizeof(int)* 1);
//int *dev_c;
int score[1] = { 0 };
// int *hc=new int[max];
cudaMemcpy(dev_score, score, sizeof(int), cudaMemcpyHostToDevice);
//CUDA_CALL(cudaHostGetDevicePointer(&dev_c, c, 0));
//cudaMemcpy(gpudata, data, sizeof(int) * DATA_SIZE,cudaMemcpyHostToDevice);
calScore << <num / 256+1, 256 >> >(thrust::raw_pointer_cast(&cutMassTest[0]), thrust::raw_pointer_cast(&cutPeakTest[0]), ma_size, mb_size, ma, mb, 0.015, c, num, dev_score, max);
//cudaThreadSynchronize();
cudaMemcpy(score, dev_score, sizeof(int), cudaMemcpyDeviceToHost);
//cudaMemcpy(hc, c, sizeof(int)*max, cudaMemcpyDeviceToHost);
//int *tempresult = max_element(hc, hc + max);
cout << "并行:" << score[0] << endl;
//cout << "标记:" << testIndex << endl;
cudaFreeHost(c);
cudaFreeHost(dev_score);
}
//}
//for (int i = 0; i < fi;i++)
// cudaStreamDestroy(stream[i]);
clock_t dev_end = clock();
//cudaEventRecord(device_stop0, NULL);
//cudaEventSynchronize(device_start0);
//cudaEventSynchronize(device_stop0);
//float msecTotal0 = 0.0f;
//cudaEventElapsedTime(&msecTotal0, device_start0, device_stop0);
out << " Time GPU protein filter:" << dev_end-dev_start << "ms"<<endl;
}
__global__ void msFiltergpu(int* cutMass_begin, int* cutMass_end, double* a,double* b,int m){
int idx = blockDim.x*blockIdx.x + threadIdx.x;
//first convolution stage
double amino_acid_min = 75.06;
//clock_t c_start = clock();
int t = 8;
int index_start = cutMass_begin[idx];
int index_end = cutMass_end[idx];
int n = index_end-index_start+1;
double ma = a[index_end];
double mb = b[m - 1];
double ee = 1;
int maxee = (ma + mb) / ee;
thrust::device_vector<double> begin;
thrust::device_vector<double> end;
thrust::device_vector<double> diff;
//vector<int> index;
thrust::device_vector<int> tempd;
thrust::device_vector<int> cc(maxee+2, 0);
for (int i = index_start; i < index_end+1; i++){ //calculate the convolution array at resolution ee>e
for (int j = 0; j < m; j++){
int d = (b[j] - a[i]+ma) / ee;
cc[d] = cc[d] + 1;
if (cc[d]>t){
//find function
if (!findVal(thrust::raw_pointer_cast(&tempd[0]),d,tempd.size())){
//index.push_back(j*n + i);
tempd.push_back(d);
diff.push_back( b[j] - a[i]);
}
}
}
}
//cout << "1Da num:" << index.size() << endl;
//stage2
thrust::sort(diff.begin(), diff.end());
double e = 0.015;
for (int i = 0; i < diff.size(); i++){
if (i+1<diff.size()&&diff[i + 1] - diff[i]+2*ee < amino_acid_min){
begin.push_back(diff[i] - ee);
end.push_back(diff[i + 1] + ee);
i++;
}
else{
begin.push_back(diff[i] - ee);
end.push_back(diff[i] + ee);
}
}
t = 0;
int max = (ma + mb) / e;
thrust::device_vector<int> c(max+2, 0);
for (int k= 0; k < begin.size(); k++){
int i = 0, j = 0;
double first = begin[k];
double last = end[k];
//cout << diff[index[k]] << " ";
while (i < n&&j < m){
double d = b[j] - a[index_start+i];
if (d < first){
j++;
}
else{
i++;
if (d < last){
// cout << d << endl;
int indexd = (d + ma) / e;
c[indexd] = c[indexd] + 1;;
if (c[indexd]>t){
t = c[indexd];
}
indexd++;
c[indexd]=c[indexd]+1;
if (c[indexd] > t){
t = c[indexd];
}
}
}
}
}
printf("gpu msFilter: %d", t);
}
__global__ void testt(){
int idx = blockDim.x*blockIdx.x + threadIdx.x;
printf("idx: %d", idx);
}
*/ |
40956cdd80013fa38f9e5419ef65469f2cd07bbb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void update_gpu( const double *r, double *du, double *u, double *u_sum,
double *u_max) {
*u += *du + alpha * (*r);
*du = 0.0f;
*u_sum += (*u) * (*u);
*u_max = maxfun(*u_max, *u);
}
// CUDA kernel function
__global__ void op_cuda_update(
const double *__restrict arg0,
double *arg1,
double *arg2,
double *arg3,
double *arg4,
int set_size ) {
double arg3_l[1];
for ( int d=0; d<1; d++ ){
arg3_l[d]=ZERO_double;
}
double arg4_l[1];
for ( int d=0; d<1; d++ ){
arg4_l[d]=arg4[d+blockIdx.x*1];
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
update_gpu(arg0+n*1,
arg1+n*1,
arg2+n*1,
arg3_l,
arg4_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg3[d+blockIdx.x*1],arg3_l[d]);
}
for ( int d=0; d<1; d++ ){
op_reduction<OP_MAX>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
}
//host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4){
double*arg3h = (double *)arg3.data;
double*arg4h = (double *)arg4.data;
int nargs = 5;
op_arg args[5];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(1);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg3.data = OP_reduct_h + reduct_bytes;
arg3.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((double *)arg3.data)[d+b*1] = ZERO_double;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((double *)arg4.data)[d+b*1] = arg4h[d];
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0,
(double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
(double *) arg3.data_d,
(double *) arg4.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg3h[d] = arg3h[d] + ((double *)arg3.data)[d+b*1];
}
}
arg3.data = (char *)arg3h;
op_mpi_reduce(&arg3,arg3h);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg4h[d] = MAX(arg4h[d],((double *)arg4.data)[d+b*1]);
}
}
arg4.data = (char *)arg4h;
op_mpi_reduce(&arg4,arg4h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[1].time += wall_t2 - wall_t1;
OP_kernels[1].transfer += (float)set->size * arg0.size;
OP_kernels[1].transfer += (float)set->size * arg1.size * 2.0f;
OP_kernels[1].transfer += (float)set->size * arg2.size * 2.0f;
}
| 40956cdd80013fa38f9e5419ef65469f2cd07bbb.cu | //
// auto-generated by op2.py
//
//user function
__device__ void update_gpu( const double *r, double *du, double *u, double *u_sum,
double *u_max) {
*u += *du + alpha * (*r);
*du = 0.0f;
*u_sum += (*u) * (*u);
*u_max = maxfun(*u_max, *u);
}
// CUDA kernel function
__global__ void op_cuda_update(
const double *__restrict arg0,
double *arg1,
double *arg2,
double *arg3,
double *arg4,
int set_size ) {
double arg3_l[1];
for ( int d=0; d<1; d++ ){
arg3_l[d]=ZERO_double;
}
double arg4_l[1];
for ( int d=0; d<1; d++ ){
arg4_l[d]=arg4[d+blockIdx.x*1];
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
update_gpu(arg0+n*1,
arg1+n*1,
arg2+n*1,
arg3_l,
arg4_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg3[d+blockIdx.x*1],arg3_l[d]);
}
for ( int d=0; d<1; d++ ){
op_reduction<OP_MAX>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
}
//host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4){
double*arg3h = (double *)arg3.data;
double*arg4h = (double *)arg4.data;
int nargs = 5;
op_arg args[5];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(1);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg3.data = OP_reduct_h + reduct_bytes;
arg3.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((double *)arg3.data)[d+b*1] = ZERO_double;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((double *)arg4.data)[d+b*1] = arg4h[d];
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
op_cuda_update<<<nblocks,nthread,nshared>>>(
(double *) arg0.data_d,
(double *) arg1.data_d,
(double *) arg2.data_d,
(double *) arg3.data_d,
(double *) arg4.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg3h[d] = arg3h[d] + ((double *)arg3.data)[d+b*1];
}
}
arg3.data = (char *)arg3h;
op_mpi_reduce(&arg3,arg3h);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg4h[d] = MAX(arg4h[d],((double *)arg4.data)[d+b*1]);
}
}
arg4.data = (char *)arg4h;
op_mpi_reduce(&arg4,arg4h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[1].time += wall_t2 - wall_t1;
OP_kernels[1].transfer += (float)set->size * arg0.size;
OP_kernels[1].transfer += (float)set->size * arg1.size * 2.0f;
OP_kernels[1].transfer += (float)set->size * arg2.size * 2.0f;
}
|
3ae0fcb438b750abefa08fa16520fd3e7efb233d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
extern "C" void initCuda(int argc, char **argv);
extern "C" void process(int pbo_in, int pbo_out, int width, int height, int radius);
extern "C" void pboRegister(int pbo);
extern "C" void pboUnregister(int pbo);
// clamp x to range [a, b]
__device__ float clamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__ int clamp(int x, int a, int b)
{
return max(a, min(b, x));
}
// convert floating point rgb color to 8-bit integer
__device__ int rgbToInt(float r, float g, float b)
{
r = clamp(r, 0.0f, 255.0f);
g = clamp(g, 0.0f, 255.0f);
b = clamp(b, 0.0f, 255.0f);
return (int(b)<<16) | (int(g)<<8) | int(r);
}
// get pixel from 2D image, with clamping to border
__device__ int getPixel(int *data, int x, int y, int width, int height)
{
x = clamp(x, 0, width-1);
y = clamp(y, 0, height-1);
return data[y*width+x];
}
// macros to make indexing shared memory easier
#define SMEM(X, Y) sdata[(Y)*tilew+(X)]
/*
2D convolution using shared memory
- operates on 8-bit RGB data stored in 32-bit int
- assumes kernel radius is less than or equal to block size
- not optimized for performance
_____________
| : : |
|_ _:_____:_ _|
| | | |
| | | |
|_ _|_____|_ _|
r | : : |
|___:_____:___|
r bw r
<----tilew---->
*/
__global__ void
cudaProcess(int* g_data, int* g_odata, int imgw, int imgh, int tilew, int r, float threshold, float highlight)
{
extern __shared__ int sdata[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
// copy tile to shared memory
// center region
SMEM(r + tx, r + ty) = getPixel(g_data, x, y, imgw, imgh);
// borders
if (threadIdx.x < r) {
// left
SMEM(tx, r + ty) = getPixel(g_data, x - r, y, imgw, imgh);
// right
SMEM(r + bw + tx, r + ty) = getPixel(g_data, x + bw, y, imgw, imgh);
}
if (threadIdx.y < r) {
// top
SMEM(r + tx, ty) = getPixel(g_data, x, y - r, imgw, imgh);
// bottom
SMEM(r + tx, r + bh + ty) = getPixel(g_data, x, y + bh, imgw, imgh);
}
// load corners
if ((threadIdx.x < r) && (threadIdx.y < r)) {
// tl
SMEM(tx, ty) = getPixel(g_data, x - r, y - r, imgw, imgh);
// bl
SMEM(tx, r + bh + ty) = getPixel(g_data, x - r, y + bh, imgw, imgh);
// tr
SMEM(r + bw + tx, ty) = getPixel(g_data, x + bh, y - r, imgw, imgh);
// br
SMEM(r + bw + tx, r + bh + ty) = getPixel(g_data, x + bw, y + bh, imgw, imgh);
}
// wait for loads to complete
__syncthreads();
// perform convolution
float rsum = 0.0;
float gsum = 0.0;
float bsum = 0.0;
float samples = 0.0;
for(int dy=-r; dy<=r; dy++) {
for(int dx=-r; dx<=r; dx++) {
#if 0
// try this to see the benefit of using shared memory
int pixel = getPixel(g_data, x+dx, y+dy, imgw, imgh);
#else
int pixel = SMEM(r+tx+dx, r+ty+dy);
#endif
// only sum pixels within disc-shaped kernel
float l = dx*dx + dy*dy;
if (l <= r*r) {
float r = float(pixel&0xff);
float g = float((pixel>>8)&0xff);
float b = float((pixel>>16)&0xff);
#if 1
// brighten highlights
float lum = (r + g + b) / (255*3);
if (lum > threshold) {
r *= highlight;
g *= highlight;
b *= highlight;
}
#endif
rsum += r;
gsum += g;
bsum += b;
samples += 1.0;
}
}
}
rsum /= samples;
gsum /= samples;
bsum /= samples;
g_odata[y*imgw+x] = rgbToInt(rsum, gsum, bsum);
}
extern "C" void
launch_cudaProcess(dim3 grid, dim3 block, int sbytes, int* g_data, int* g_odata,
int imgw, int imgh, int tilew,
int radius, float threshold, float highlight)
{
hipLaunchKernelGGL(( cudaProcess), dim3(grid), dim3(block), sbytes , 0, g_data, g_odata, imgw, imgh, block.x+(2*radius), radius, 0.8f, 4.0f);
}
| 3ae0fcb438b750abefa08fa16520fd3e7efb233d.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
extern "C" void initCuda(int argc, char **argv);
extern "C" void process(int pbo_in, int pbo_out, int width, int height, int radius);
extern "C" void pboRegister(int pbo);
extern "C" void pboUnregister(int pbo);
// clamp x to range [a, b]
__device__ float clamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__ int clamp(int x, int a, int b)
{
return max(a, min(b, x));
}
// convert floating point rgb color to 8-bit integer
__device__ int rgbToInt(float r, float g, float b)
{
r = clamp(r, 0.0f, 255.0f);
g = clamp(g, 0.0f, 255.0f);
b = clamp(b, 0.0f, 255.0f);
return (int(b)<<16) | (int(g)<<8) | int(r);
}
// get pixel from 2D image, with clamping to border
__device__ int getPixel(int *data, int x, int y, int width, int height)
{
x = clamp(x, 0, width-1);
y = clamp(y, 0, height-1);
return data[y*width+x];
}
// macros to make indexing shared memory easier
#define SMEM(X, Y) sdata[(Y)*tilew+(X)]
/*
2D convolution using shared memory
- operates on 8-bit RGB data stored in 32-bit int
- assumes kernel radius is less than or equal to block size
- not optimized for performance
_____________
| : : |
|_ _:_____:_ _|
| | | |
| | | |
|_ _|_____|_ _|
r | : : |
|___:_____:___|
r bw r
<----tilew---->
*/
__global__ void
cudaProcess(int* g_data, int* g_odata, int imgw, int imgh, int tilew, int r, float threshold, float highlight)
{
extern __shared__ int sdata[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
// copy tile to shared memory
// center region
SMEM(r + tx, r + ty) = getPixel(g_data, x, y, imgw, imgh);
// borders
if (threadIdx.x < r) {
// left
SMEM(tx, r + ty) = getPixel(g_data, x - r, y, imgw, imgh);
// right
SMEM(r + bw + tx, r + ty) = getPixel(g_data, x + bw, y, imgw, imgh);
}
if (threadIdx.y < r) {
// top
SMEM(r + tx, ty) = getPixel(g_data, x, y - r, imgw, imgh);
// bottom
SMEM(r + tx, r + bh + ty) = getPixel(g_data, x, y + bh, imgw, imgh);
}
// load corners
if ((threadIdx.x < r) && (threadIdx.y < r)) {
// tl
SMEM(tx, ty) = getPixel(g_data, x - r, y - r, imgw, imgh);
// bl
SMEM(tx, r + bh + ty) = getPixel(g_data, x - r, y + bh, imgw, imgh);
// tr
SMEM(r + bw + tx, ty) = getPixel(g_data, x + bh, y - r, imgw, imgh);
// br
SMEM(r + bw + tx, r + bh + ty) = getPixel(g_data, x + bw, y + bh, imgw, imgh);
}
// wait for loads to complete
__syncthreads();
// perform convolution
float rsum = 0.0;
float gsum = 0.0;
float bsum = 0.0;
float samples = 0.0;
for(int dy=-r; dy<=r; dy++) {
for(int dx=-r; dx<=r; dx++) {
#if 0
// try this to see the benefit of using shared memory
int pixel = getPixel(g_data, x+dx, y+dy, imgw, imgh);
#else
int pixel = SMEM(r+tx+dx, r+ty+dy);
#endif
// only sum pixels within disc-shaped kernel
float l = dx*dx + dy*dy;
if (l <= r*r) {
float r = float(pixel&0xff);
float g = float((pixel>>8)&0xff);
float b = float((pixel>>16)&0xff);
#if 1
// brighten highlights
float lum = (r + g + b) / (255*3);
if (lum > threshold) {
r *= highlight;
g *= highlight;
b *= highlight;
}
#endif
rsum += r;
gsum += g;
bsum += b;
samples += 1.0;
}
}
}
rsum /= samples;
gsum /= samples;
bsum /= samples;
g_odata[y*imgw+x] = rgbToInt(rsum, gsum, bsum);
}
extern "C" void
launch_cudaProcess(dim3 grid, dim3 block, int sbytes, int* g_data, int* g_odata,
int imgw, int imgh, int tilew,
int radius, float threshold, float highlight)
{
cudaProcess<<< grid, block, sbytes >>> (g_data, g_odata, imgw, imgh, block.x+(2*radius), radius, 0.8f, 4.0f);
}
|
4e0aaa801f10d357131b1cda11588fbf4d5a0ae2.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <random>
#include <tuple>
#include <vector>
#include <iostream>
#include <rnnt.h>
#include "test.h"
template<typename T>
void vector_to_gpu(T*& gpu_space, std::vector<T>& vec, hipStream_t& stream) {
hipMalloc(&gpu_space, vec.size() * sizeof(T));
hipMemcpyAsync(gpu_space, vec.data(), vec.size() * sizeof(T), hipMemcpyHostToDevice, stream);
}
bool small_test() {
const int B = 1;
const int alphabet_size = 5;
const int T = 2;
const int U = 3;
std::vector<float> acts = {0.1, 0.6, 0.1, 0.1, 0.1, 0.1,
0.1, 0.6, 0.1, 0.1, 0.1, 0.1,
0.2, 0.8, 0.1, 0.1, 0.6, 0.1,
0.1, 0.1, 0.1, 0.1, 0.2, 0.1,
0.1, 0.7, 0.1, 0.2, 0.1, 0.1};
// std::vector<float> log_probs(acts.size());
// softmax(acts.data(), alphabet_size, B * T * U, log_probs.data(), true);
float expected_score = 4.495666;
std::vector<int> labels = {1, 2};
std::vector<int> label_lengths = {2};
std::vector<int> lengths;
lengths.push_back(T);
float score;
rnntOptions options{};
options.maxT = T;
options.maxU = U;
options.loc = RNNT_GPU;
options.blank_label = 0;
hipStream_t stream;
hipStreamCreate(&stream);
options.stream = stream;
options.num_threads = 1;
float* acts_gpu;
vector_to_gpu(acts_gpu, acts, stream);
int* label_gpu;
vector_to_gpu(label_gpu, labels, stream);
int* label_length_gpu;
vector_to_gpu(label_length_gpu, label_lengths, stream);
int* input_length_gpu;
vector_to_gpu(input_length_gpu, lengths, stream);
size_t gpu_alloc_bytes;
throw_on_error(get_rnnt_workspace_size(T, U, B,
true,
&gpu_alloc_bytes),
"Error: get_rnnt_workspace_size in small_test");
void* rnnt_gpu_workspace;
hipMalloc(&rnnt_gpu_workspace, gpu_alloc_bytes);
throw_on_error(compute_rnnt_loss(acts_gpu,
NULL,
label_gpu,
label_length_gpu,
input_length_gpu,
alphabet_size,
lengths.size(),
&score,
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss in small_test");
hipFree(rnnt_gpu_workspace);
hipFree(acts_gpu);
hipFree(label_gpu);
hipFree(label_length_gpu);
hipFree(input_length_gpu);
const float eps = 1e-4;
const float lb = expected_score - eps;
const float ub = expected_score + eps;
return (score > lb && score < ub);
}
bool options_test() {
const int alphabet_size = 3;
const int T = 4;
const int L = 3;
const int minibatch = 2;
/*
std::vector<float> acts = {0.065357, 0.787530, 0.081592, 0.529716, 0.750675, 0.754135,
0.609764, 0.868140, 0.622532, 0.668522, 0.858039, 0.164539,
0.989780, 0.944298, 0.603168, 0.946783, 0.666203, 0.286882,
0.094184, 0.366674, 0.736168, 0.166680, 0.714154, 0.399400,
0.535982, 0.291821, 0.612642, 0.324241, 0.800764, 0.524106,
0.779195, 0.183314, 0.113745, 0.240222, 0.339470, 0.134160,
0.505562, 0.051597, 0.640290, 0.430733, 0.829473, 0.177467,
0.320700, 0.042883, 0.302803, 0.675178, 0.569537, 0.558474,
0.083132, 0.060165, 0.107958, 0.748615, 0.943918, 0.486356,
0.418199, 0.652408, 0.024243, 0.134582, 0.366342, 0.295830,
0.923670, 0.689929, 0.741898, 0.250005, 0.603430, 0.987289,
0.592606, 0.884672, 0.543450, 0.660770, 0.377128, 0.358021};
// std::vector<float> log_probs(acts.size());
// softmax(acts.data(), alphabet_size, minibatch * T * L, log_probs.data(), true);
std::vector<float> expected_grads = {-0.186844, -0.062555, 0.249399, -0.203377, 0.202399, 0.000977,
-0.141016, 0.079123, 0.061893, -0.011552, -0.081280, 0.092832,
-0.154257, 0.229433, -0.075176, -0.246593, 0.146405, 0.100188,
-0.012918, -0.061593, 0.074512, -0.055986, 0.219831, -0.163845,
-0.497627, 0.209240, 0.288387, 0.013605, -0.030220, 0.016615,
0.113925, 0.062781, -0.176706, -0.667078, 0.367659, 0.299419,
-0.356344, -0.055347, 0.411691, -0.096922, 0.029459, 0.067463,
-0.063518, 0.027654, 0.035863, -0.154499, -0.073942, 0.228441,
-0.166790, -0.000088, 0.166878, -0.172370, 0.105565, 0.066804,
0.023875, -0.118256, 0.094381, -0.104707, -0.108934, 0.213642,
-0.369844, 0.180118, 0.189726, 0.025714, -0.079462, 0.053748,
0.122328, -0.238789, 0.116460, -0.598687, 0.302203, 0.296484};
*/
std::vector<float> acts = { 0.065357, 0.787530, 0.081592, 0.505562, 0.051597, 0.640290,
0.529716, 0.750675, 0.754135, 0.430733, 0.829473, 0.177467,
0.609764, 0.868140, 0.622532, 0.320700, 0.042883, 0.302803,
0.668522, 0.858039, 0.164539, 0.675178, 0.569537, 0.558474,
0.989780, 0.944298, 0.603168, 0.083132, 0.060165, 0.107958,
0.946783, 0.666203, 0.286882, 0.748615, 0.943918, 0.486356,
0.094184, 0.366674, 0.736168, 0.418199, 0.652408, 0.024243,
0.166680, 0.714154, 0.399400, 0.134582, 0.366342, 0.295830,
0.535982, 0.291821, 0.612642, 0.923670, 0.689929, 0.741898,
0.324241, 0.800764, 0.524106, 0.250005, 0.603430, 0.987289,
0.779195, 0.183314, 0.113745, 0.592606, 0.884672, 0.543450,
0.240222, 0.339470, 0.134160, 0.660770, 0.377128, 0.358021};
std::vector<float> expected_grads = { -0.186844, -0.062555, 0.249399, -0.356344, -0.055347, 0.411691,
-0.203377, 0.202399, 0.000977, -0.096922, 0.029459, 0.067463,
-0.141016, 0.079123, 0.061893, -0.063518, 0.027654, 0.035863,
-0.011552, -0.081280, 0.092832, -0.154499, -0.073942, 0.228441,
-0.154257, 0.229433, -0.075176, -0.166790, -0.000088, 0.166878,
-0.246593, 0.146405, 0.100188, -0.172370, 0.105565, 0.066804,
-0.012918, -0.061593, 0.074512, 0.023875, -0.118256, 0.094381,
-0.055986, 0.219831, -0.163845, -0.104707, -0.108934, 0.213642,
-0.497627, 0.209240, 0.288387, -0.369844, 0.180118, 0.189726,
0.013605, -0.030220, 0.016615, 0.025714, -0.079462, 0.053748,
0.113925, 0.062781, -0.176706, 0.122328, -0.238789, 0.116460,
-0.667078, 0.367659, 0.299419, -0.598687, 0.302203, 0.296484};
// Calculate the expected scores analytically
std::vector<double> expected_scores(2);
expected_scores[0] = 4.2806528590890736;
expected_scores[1] = 3.9384369822503591;
std::vector<int> labels = {1, 2, 1, 1};
std::vector<int> label_lengths = {2, 2};
std::vector<int> lengths = {4, 4};
std::vector<float> grads(acts.size());
std::vector<float> scores(2);
rnntOptions options{};
options.maxT = T;
options.maxU = L;
options.loc = RNNT_GPU;
hipStream_t stream;
hipStreamCreate(&stream);
options.stream = stream;
options.num_threads = 1;
options.batch_first = false;
float* acts_gpu;
vector_to_gpu(acts_gpu, acts, stream);
float* grads_gpu;
hipMalloc(&grads_gpu, grads.size() * sizeof(float));
int* label_gpu;
vector_to_gpu(label_gpu, labels, stream);
int* label_length_gpu;
vector_to_gpu(label_length_gpu, label_lengths, stream);
int* input_length_gpu;
vector_to_gpu(input_length_gpu, lengths, stream);
size_t gpu_alloc_bytes;
throw_on_error(get_rnnt_workspace_size(T, L, minibatch,
true,
&gpu_alloc_bytes),
"Error: get_rnnt_workspace_size in options_test");
void* rnnt_gpu_workspace;
hipMalloc(&rnnt_gpu_workspace, gpu_alloc_bytes);
throw_on_error(compute_rnnt_loss(acts_gpu,
grads_gpu,
label_gpu,
label_length_gpu,
input_length_gpu,
alphabet_size,
lengths.size(),
scores.data(),
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss in small_test");
hipMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), hipMemcpyDeviceToHost, stream);
hipFree(rnnt_gpu_workspace);
hipFree(acts_gpu);
hipFree(grads_gpu);
hipFree(label_gpu);
hipFree(label_length_gpu);
hipFree(input_length_gpu);
const double eps = 1e-4;
bool result = true;
// activations gradient check
for (int i = 0; i < grads.size(); i++) {
const double lb = expected_grads[i] - eps;
const double ub = expected_grads[i] + eps;
if (!(grads[i] > lb && grads[i] < ub)) {
std::cerr << "grad mismatch in options_test"
<< " expected grad: " << expected_grads[i]
<< " calculated score: " << grads[i]
<< " !(" << lb << " < " << grads[i]
<< " < " << ub << ")" << std::endl;
result = false;
}
}
for (int i = 0; i < 2; i++) {
const double lb = expected_scores[i] - eps;
const double ub = expected_scores[i] + eps;
if (!(scores[i] > lb && scores[i] < ub)) {
std::cerr << "score mismatch in options_test"
<< " expected score: " << expected_scores[i]
<< " calculated score: " << scores[i]
<< " !(" << lb << " < " << scores[i]
<< " < " << ub << ")" << std::endl;
result = false;
}
}
return result;
}
bool inf_test() {
const int alphabet_size = 15;
const int T = 50;
const int L = 10;
const int minibatch = 1;
std::vector<int> labels = genLabels(alphabet_size, L-1);
labels[0] = 2;
std::vector<int> label_lengths = {L-1};
std::vector<float> acts(alphabet_size * T * L * minibatch);
genActs(acts);
// std::vector<float> log_probs(acts.size());
// softmax(acts.data(), alphabet_size, minibatch * T * L, log_probs.data(), true);
std::vector<int> sizes;
sizes.push_back(T);
std::vector<float> grads(acts.size());
float cost;
rnntOptions options{};
options.maxT = T;
options.maxU = L;
options.loc = RNNT_GPU;
hipStream_t stream;
hipStreamCreate(&stream);
options.stream = stream;
options.num_threads = 1;
options.batch_first = true;
float* acts_gpu;
vector_to_gpu(acts_gpu, acts, stream);
float* grads_gpu;
hipMalloc(&grads_gpu, grads.size() * sizeof(float));
int* label_gpu;
vector_to_gpu(label_gpu, labels, stream);
int* label_length_gpu;
vector_to_gpu(label_length_gpu, label_lengths, stream);
int* input_length_gpu;
vector_to_gpu(input_length_gpu, sizes, stream);
size_t gpu_alloc_bytes;
throw_on_error(get_rnnt_workspace_size(T, L, minibatch,
true,
&gpu_alloc_bytes),
"Error: get_rnnt_workspace_size in inf_test");
void* rnnt_gpu_workspace;
hipMalloc(&rnnt_gpu_workspace, gpu_alloc_bytes);
throw_on_error(compute_rnnt_loss(acts_gpu,
grads_gpu,
label_gpu,
label_length_gpu,
input_length_gpu,
alphabet_size,
sizes.size(),
&cost,
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss in small_test");
hipMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), hipMemcpyDeviceToHost, stream);
hipFree(rnnt_gpu_workspace);
hipFree(acts_gpu);
hipFree(grads_gpu);
hipFree(label_gpu);
hipFree(label_length_gpu);
hipFree(input_length_gpu);
bool status = true;
status &= !std::isinf(cost);
for (int i = 0; i < alphabet_size * L * T * minibatch; ++i)
status &= !std::isnan(grads[i]);
return status;
}
void numeric_grad(float* acts, int* flat_labels, int* label_lengths,
int* sizes, int alphabet_size, int minibatch,
void* rnnt_gpu_workspace, rnntOptions& options, std::vector<float>& num_grad) {
float epsilon = 1e-2;
float act;
for (int i = 0; i < num_grad.size(); ++i) {
std::vector<float> costsP1(minibatch);
std::vector<float> costsP2(minibatch);
hipMemcpy(&act, &acts[i], sizeof(float), hipMemcpyDeviceToHost);
act += epsilon;
hipMemcpy(&acts[i], &act, sizeof(float), hipMemcpyHostToDevice);
throw_on_error(compute_rnnt_loss(acts,
NULL,
flat_labels,
label_lengths,
sizes,
alphabet_size,
minibatch,
costsP1.data(),
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss (1) in grad_check");
hipMemcpy(&act, &acts[i], sizeof(float), hipMemcpyDeviceToHost);
act -= 2 * epsilon;
hipMemcpy(&acts[i], &act, sizeof(float), hipMemcpyHostToDevice);
throw_on_error(compute_rnnt_loss(acts,
NULL,
flat_labels,
label_lengths,
sizes,
alphabet_size,
minibatch,
costsP2.data(),
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss (2) in grad_check");
float costP1 = std::accumulate(costsP1.begin(), costsP1.end(), 0.);
float costP2 = std::accumulate(costsP2.begin(), costsP2.end(), 0.);
hipMemcpy(&act, &acts[i], sizeof(float), hipMemcpyDeviceToHost);
act += epsilon;
hipMemcpy(&acts[i], &act, sizeof(float), hipMemcpyHostToDevice);
num_grad[i] = (costP1 - costP2) / (2 * epsilon);
}
}
bool grad_check(int T, int L, int alphabet_size,
std::vector<float>& acts,
const std::vector<std::vector<int>>& labels,
std::vector<int>& sizes, float tol) {
const int minibatch = labels.size();
std::vector<int> flat_labels;
std::vector<int> label_lengths;
for (const auto& l : labels) {
flat_labels.insert(flat_labels.end(), l.begin(), l.end());
label_lengths.push_back(l.size());
}
std::vector<float> costs(minibatch);
std::vector<float> grads(acts.size());
rnntOptions options{};
options.maxT = T;
options.maxU = L;
options.loc = RNNT_GPU;
hipStream_t stream;
hipStreamCreate(&stream);
options.stream = stream;
options.num_threads = 1;
float* acts_gpu;
vector_to_gpu(acts_gpu, acts, stream);
float* grads_gpu;
hipMalloc(&grads_gpu, grads.size() * sizeof(float));
int* label_gpu;
vector_to_gpu(label_gpu, flat_labels, stream);
int* label_length_gpu;
vector_to_gpu(label_length_gpu, label_lengths, stream);
int* input_length_gpu;
vector_to_gpu(input_length_gpu, sizes, stream);
options.num_threads = 1;
options.batch_first = true;
size_t gpu_alloc_bytes;
throw_on_error(get_rnnt_workspace_size(T, L, sizes.size(),
true,
&gpu_alloc_bytes),
"Error: get_rnnt_workspace_size in grad_check");
void* rnnt_gpu_workspace;
hipMalloc(&rnnt_gpu_workspace, gpu_alloc_bytes);
throw_on_error(compute_rnnt_loss(acts_gpu,
grads_gpu,
label_gpu,
label_length_gpu,
input_length_gpu,
alphabet_size,
sizes.size(),
costs.data(),
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss (0) in grad_check");
float cost = std::accumulate(costs.begin(), costs.end(), 0.);
hipMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), hipMemcpyDeviceToHost, stream);
std::vector<float> num_grad(grads.size());
//perform 2nd order central differencing
numeric_grad(acts_gpu, label_gpu, label_length_gpu, input_length_gpu,
alphabet_size, minibatch, rnnt_gpu_workspace, options, num_grad);
hipFree(acts_gpu);
hipFree(rnnt_gpu_workspace);
hipFree(grads_gpu);
hipFree(label_gpu);
hipFree(label_length_gpu);
hipFree(input_length_gpu);
float diff = rel_diff(grads, num_grad);
return diff < tol;
}
bool run_tests() {
std::vector<std::tuple<int, int, int, int, float>> problem_sizes =
{std::make_tuple(20, 50, 15, 1, 1e-2),
std::make_tuple(5, 10, 5, 65, 1e-2)
};
std::mt19937 gen(2);
bool status = true;
for (auto problem : problem_sizes) {
int alphabet_size, T, L, minibatch;
float tol;
std::tie(alphabet_size, T, L, minibatch, tol) = problem;
std::vector<float> acts(alphabet_size * T * L * minibatch);
genActs(acts);
std::vector<float> log_probs(acts.size());
softmax(acts.data(), alphabet_size, minibatch * T * L, log_probs.data(), true);
std::vector<std::vector<int>> labels;
std::vector<int> sizes;
for (int mb = 0; mb < minibatch; ++mb) {
int actual_length = L - 1;
labels.push_back(genLabels(alphabet_size, actual_length));
sizes.push_back(T);
}
status &= grad_check(T, L, alphabet_size, acts, labels, sizes, tol);
}
return status;
}
int main(void) {
if (get_warprnnt_version() != 1) {
std::cerr << "Invalid Warp-transducer version." << std::endl;
return 1;
}
std::cout << "Running gpu tests" << std::endl;
bool status = true;
status &= small_test();
printf("finish small_test %d\n", status);
status &= options_test();
printf("finish options_test %d\n", status);
status &= inf_test();
printf("finish inf_test %d\n", status);
status &= run_tests();
printf("finished %d\n", status);
if (status) {
std::cout << "Tests pass" << std::endl;
return 0;
} else {
std::cout << "Some or all tests fail" << std::endl;
return 1;
}
}
| 4e0aaa801f10d357131b1cda11588fbf4d5a0ae2.cu | #include <cmath>
#include <random>
#include <tuple>
#include <vector>
#include <iostream>
#include <rnnt.h>
#include "test.h"
template<typename T>
void vector_to_gpu(T*& gpu_space, std::vector<T>& vec, cudaStream_t& stream) {
cudaMalloc(&gpu_space, vec.size() * sizeof(T));
cudaMemcpyAsync(gpu_space, vec.data(), vec.size() * sizeof(T), cudaMemcpyHostToDevice, stream);
}
bool small_test() {
const int B = 1;
const int alphabet_size = 5;
const int T = 2;
const int U = 3;
std::vector<float> acts = {0.1, 0.6, 0.1, 0.1, 0.1, 0.1,
0.1, 0.6, 0.1, 0.1, 0.1, 0.1,
0.2, 0.8, 0.1, 0.1, 0.6, 0.1,
0.1, 0.1, 0.1, 0.1, 0.2, 0.1,
0.1, 0.7, 0.1, 0.2, 0.1, 0.1};
// std::vector<float> log_probs(acts.size());
// softmax(acts.data(), alphabet_size, B * T * U, log_probs.data(), true);
float expected_score = 4.495666;
std::vector<int> labels = {1, 2};
std::vector<int> label_lengths = {2};
std::vector<int> lengths;
lengths.push_back(T);
float score;
rnntOptions options{};
options.maxT = T;
options.maxU = U;
options.loc = RNNT_GPU;
options.blank_label = 0;
cudaStream_t stream;
cudaStreamCreate(&stream);
options.stream = stream;
options.num_threads = 1;
float* acts_gpu;
vector_to_gpu(acts_gpu, acts, stream);
int* label_gpu;
vector_to_gpu(label_gpu, labels, stream);
int* label_length_gpu;
vector_to_gpu(label_length_gpu, label_lengths, stream);
int* input_length_gpu;
vector_to_gpu(input_length_gpu, lengths, stream);
size_t gpu_alloc_bytes;
throw_on_error(get_rnnt_workspace_size(T, U, B,
true,
&gpu_alloc_bytes),
"Error: get_rnnt_workspace_size in small_test");
void* rnnt_gpu_workspace;
cudaMalloc(&rnnt_gpu_workspace, gpu_alloc_bytes);
throw_on_error(compute_rnnt_loss(acts_gpu,
NULL,
label_gpu,
label_length_gpu,
input_length_gpu,
alphabet_size,
lengths.size(),
&score,
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss in small_test");
cudaFree(rnnt_gpu_workspace);
cudaFree(acts_gpu);
cudaFree(label_gpu);
cudaFree(label_length_gpu);
cudaFree(input_length_gpu);
const float eps = 1e-4;
const float lb = expected_score - eps;
const float ub = expected_score + eps;
return (score > lb && score < ub);
}
bool options_test() {
const int alphabet_size = 3;
const int T = 4;
const int L = 3;
const int minibatch = 2;
/*
std::vector<float> acts = {0.065357, 0.787530, 0.081592, 0.529716, 0.750675, 0.754135,
0.609764, 0.868140, 0.622532, 0.668522, 0.858039, 0.164539,
0.989780, 0.944298, 0.603168, 0.946783, 0.666203, 0.286882,
0.094184, 0.366674, 0.736168, 0.166680, 0.714154, 0.399400,
0.535982, 0.291821, 0.612642, 0.324241, 0.800764, 0.524106,
0.779195, 0.183314, 0.113745, 0.240222, 0.339470, 0.134160,
0.505562, 0.051597, 0.640290, 0.430733, 0.829473, 0.177467,
0.320700, 0.042883, 0.302803, 0.675178, 0.569537, 0.558474,
0.083132, 0.060165, 0.107958, 0.748615, 0.943918, 0.486356,
0.418199, 0.652408, 0.024243, 0.134582, 0.366342, 0.295830,
0.923670, 0.689929, 0.741898, 0.250005, 0.603430, 0.987289,
0.592606, 0.884672, 0.543450, 0.660770, 0.377128, 0.358021};
// std::vector<float> log_probs(acts.size());
// softmax(acts.data(), alphabet_size, minibatch * T * L, log_probs.data(), true);
std::vector<float> expected_grads = {-0.186844, -0.062555, 0.249399, -0.203377, 0.202399, 0.000977,
-0.141016, 0.079123, 0.061893, -0.011552, -0.081280, 0.092832,
-0.154257, 0.229433, -0.075176, -0.246593, 0.146405, 0.100188,
-0.012918, -0.061593, 0.074512, -0.055986, 0.219831, -0.163845,
-0.497627, 0.209240, 0.288387, 0.013605, -0.030220, 0.016615,
0.113925, 0.062781, -0.176706, -0.667078, 0.367659, 0.299419,
-0.356344, -0.055347, 0.411691, -0.096922, 0.029459, 0.067463,
-0.063518, 0.027654, 0.035863, -0.154499, -0.073942, 0.228441,
-0.166790, -0.000088, 0.166878, -0.172370, 0.105565, 0.066804,
0.023875, -0.118256, 0.094381, -0.104707, -0.108934, 0.213642,
-0.369844, 0.180118, 0.189726, 0.025714, -0.079462, 0.053748,
0.122328, -0.238789, 0.116460, -0.598687, 0.302203, 0.296484};
*/
std::vector<float> acts = { 0.065357, 0.787530, 0.081592, 0.505562, 0.051597, 0.640290,
0.529716, 0.750675, 0.754135, 0.430733, 0.829473, 0.177467,
0.609764, 0.868140, 0.622532, 0.320700, 0.042883, 0.302803,
0.668522, 0.858039, 0.164539, 0.675178, 0.569537, 0.558474,
0.989780, 0.944298, 0.603168, 0.083132, 0.060165, 0.107958,
0.946783, 0.666203, 0.286882, 0.748615, 0.943918, 0.486356,
0.094184, 0.366674, 0.736168, 0.418199, 0.652408, 0.024243,
0.166680, 0.714154, 0.399400, 0.134582, 0.366342, 0.295830,
0.535982, 0.291821, 0.612642, 0.923670, 0.689929, 0.741898,
0.324241, 0.800764, 0.524106, 0.250005, 0.603430, 0.987289,
0.779195, 0.183314, 0.113745, 0.592606, 0.884672, 0.543450,
0.240222, 0.339470, 0.134160, 0.660770, 0.377128, 0.358021};
std::vector<float> expected_grads = { -0.186844, -0.062555, 0.249399, -0.356344, -0.055347, 0.411691,
-0.203377, 0.202399, 0.000977, -0.096922, 0.029459, 0.067463,
-0.141016, 0.079123, 0.061893, -0.063518, 0.027654, 0.035863,
-0.011552, -0.081280, 0.092832, -0.154499, -0.073942, 0.228441,
-0.154257, 0.229433, -0.075176, -0.166790, -0.000088, 0.166878,
-0.246593, 0.146405, 0.100188, -0.172370, 0.105565, 0.066804,
-0.012918, -0.061593, 0.074512, 0.023875, -0.118256, 0.094381,
-0.055986, 0.219831, -0.163845, -0.104707, -0.108934, 0.213642,
-0.497627, 0.209240, 0.288387, -0.369844, 0.180118, 0.189726,
0.013605, -0.030220, 0.016615, 0.025714, -0.079462, 0.053748,
0.113925, 0.062781, -0.176706, 0.122328, -0.238789, 0.116460,
-0.667078, 0.367659, 0.299419, -0.598687, 0.302203, 0.296484};
// Calculate the expected scores analytically
std::vector<double> expected_scores(2);
expected_scores[0] = 4.2806528590890736;
expected_scores[1] = 3.9384369822503591;
std::vector<int> labels = {1, 2, 1, 1};
std::vector<int> label_lengths = {2, 2};
std::vector<int> lengths = {4, 4};
std::vector<float> grads(acts.size());
std::vector<float> scores(2);
rnntOptions options{};
options.maxT = T;
options.maxU = L;
options.loc = RNNT_GPU;
cudaStream_t stream;
cudaStreamCreate(&stream);
options.stream = stream;
options.num_threads = 1;
options.batch_first = false;
float* acts_gpu;
vector_to_gpu(acts_gpu, acts, stream);
float* grads_gpu;
cudaMalloc(&grads_gpu, grads.size() * sizeof(float));
int* label_gpu;
vector_to_gpu(label_gpu, labels, stream);
int* label_length_gpu;
vector_to_gpu(label_length_gpu, label_lengths, stream);
int* input_length_gpu;
vector_to_gpu(input_length_gpu, lengths, stream);
size_t gpu_alloc_bytes;
throw_on_error(get_rnnt_workspace_size(T, L, minibatch,
true,
&gpu_alloc_bytes),
"Error: get_rnnt_workspace_size in options_test");
void* rnnt_gpu_workspace;
cudaMalloc(&rnnt_gpu_workspace, gpu_alloc_bytes);
throw_on_error(compute_rnnt_loss(acts_gpu,
grads_gpu,
label_gpu,
label_length_gpu,
input_length_gpu,
alphabet_size,
lengths.size(),
scores.data(),
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss in small_test");
cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream);
cudaFree(rnnt_gpu_workspace);
cudaFree(acts_gpu);
cudaFree(grads_gpu);
cudaFree(label_gpu);
cudaFree(label_length_gpu);
cudaFree(input_length_gpu);
const double eps = 1e-4;
bool result = true;
// activations gradient check
for (int i = 0; i < grads.size(); i++) {
const double lb = expected_grads[i] - eps;
const double ub = expected_grads[i] + eps;
if (!(grads[i] > lb && grads[i] < ub)) {
std::cerr << "grad mismatch in options_test"
<< " expected grad: " << expected_grads[i]
<< " calculated score: " << grads[i]
<< " !(" << lb << " < " << grads[i]
<< " < " << ub << ")" << std::endl;
result = false;
}
}
for (int i = 0; i < 2; i++) {
const double lb = expected_scores[i] - eps;
const double ub = expected_scores[i] + eps;
if (!(scores[i] > lb && scores[i] < ub)) {
std::cerr << "score mismatch in options_test"
<< " expected score: " << expected_scores[i]
<< " calculated score: " << scores[i]
<< " !(" << lb << " < " << scores[i]
<< " < " << ub << ")" << std::endl;
result = false;
}
}
return result;
}
bool inf_test() {
const int alphabet_size = 15;
const int T = 50;
const int L = 10;
const int minibatch = 1;
std::vector<int> labels = genLabels(alphabet_size, L-1);
labels[0] = 2;
std::vector<int> label_lengths = {L-1};
std::vector<float> acts(alphabet_size * T * L * minibatch);
genActs(acts);
// std::vector<float> log_probs(acts.size());
// softmax(acts.data(), alphabet_size, minibatch * T * L, log_probs.data(), true);
std::vector<int> sizes;
sizes.push_back(T);
std::vector<float> grads(acts.size());
float cost;
rnntOptions options{};
options.maxT = T;
options.maxU = L;
options.loc = RNNT_GPU;
cudaStream_t stream;
cudaStreamCreate(&stream);
options.stream = stream;
options.num_threads = 1;
options.batch_first = true;
float* acts_gpu;
vector_to_gpu(acts_gpu, acts, stream);
float* grads_gpu;
cudaMalloc(&grads_gpu, grads.size() * sizeof(float));
int* label_gpu;
vector_to_gpu(label_gpu, labels, stream);
int* label_length_gpu;
vector_to_gpu(label_length_gpu, label_lengths, stream);
int* input_length_gpu;
vector_to_gpu(input_length_gpu, sizes, stream);
size_t gpu_alloc_bytes;
throw_on_error(get_rnnt_workspace_size(T, L, minibatch,
true,
&gpu_alloc_bytes),
"Error: get_rnnt_workspace_size in inf_test");
void* rnnt_gpu_workspace;
cudaMalloc(&rnnt_gpu_workspace, gpu_alloc_bytes);
throw_on_error(compute_rnnt_loss(acts_gpu,
grads_gpu,
label_gpu,
label_length_gpu,
input_length_gpu,
alphabet_size,
sizes.size(),
&cost,
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss in small_test");
cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream);
cudaFree(rnnt_gpu_workspace);
cudaFree(acts_gpu);
cudaFree(grads_gpu);
cudaFree(label_gpu);
cudaFree(label_length_gpu);
cudaFree(input_length_gpu);
bool status = true;
status &= !std::isinf(cost);
for (int i = 0; i < alphabet_size * L * T * minibatch; ++i)
status &= !std::isnan(grads[i]);
return status;
}
void numeric_grad(float* acts, int* flat_labels, int* label_lengths,
int* sizes, int alphabet_size, int minibatch,
void* rnnt_gpu_workspace, rnntOptions& options, std::vector<float>& num_grad) {
float epsilon = 1e-2;
float act;
for (int i = 0; i < num_grad.size(); ++i) {
std::vector<float> costsP1(minibatch);
std::vector<float> costsP2(minibatch);
cudaMemcpy(&act, &acts[i], sizeof(float), cudaMemcpyDeviceToHost);
act += epsilon;
cudaMemcpy(&acts[i], &act, sizeof(float), cudaMemcpyHostToDevice);
throw_on_error(compute_rnnt_loss(acts,
NULL,
flat_labels,
label_lengths,
sizes,
alphabet_size,
minibatch,
costsP1.data(),
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss (1) in grad_check");
cudaMemcpy(&act, &acts[i], sizeof(float), cudaMemcpyDeviceToHost);
act -= 2 * epsilon;
cudaMemcpy(&acts[i], &act, sizeof(float), cudaMemcpyHostToDevice);
throw_on_error(compute_rnnt_loss(acts,
NULL,
flat_labels,
label_lengths,
sizes,
alphabet_size,
minibatch,
costsP2.data(),
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss (2) in grad_check");
float costP1 = std::accumulate(costsP1.begin(), costsP1.end(), 0.);
float costP2 = std::accumulate(costsP2.begin(), costsP2.end(), 0.);
cudaMemcpy(&act, &acts[i], sizeof(float), cudaMemcpyDeviceToHost);
act += epsilon;
cudaMemcpy(&acts[i], &act, sizeof(float), cudaMemcpyHostToDevice);
num_grad[i] = (costP1 - costP2) / (2 * epsilon);
}
}
bool grad_check(int T, int L, int alphabet_size,
std::vector<float>& acts,
const std::vector<std::vector<int>>& labels,
std::vector<int>& sizes, float tol) {
const int minibatch = labels.size();
std::vector<int> flat_labels;
std::vector<int> label_lengths;
for (const auto& l : labels) {
flat_labels.insert(flat_labels.end(), l.begin(), l.end());
label_lengths.push_back(l.size());
}
std::vector<float> costs(minibatch);
std::vector<float> grads(acts.size());
rnntOptions options{};
options.maxT = T;
options.maxU = L;
options.loc = RNNT_GPU;
cudaStream_t stream;
cudaStreamCreate(&stream);
options.stream = stream;
options.num_threads = 1;
float* acts_gpu;
vector_to_gpu(acts_gpu, acts, stream);
float* grads_gpu;
cudaMalloc(&grads_gpu, grads.size() * sizeof(float));
int* label_gpu;
vector_to_gpu(label_gpu, flat_labels, stream);
int* label_length_gpu;
vector_to_gpu(label_length_gpu, label_lengths, stream);
int* input_length_gpu;
vector_to_gpu(input_length_gpu, sizes, stream);
options.num_threads = 1;
options.batch_first = true;
size_t gpu_alloc_bytes;
throw_on_error(get_rnnt_workspace_size(T, L, sizes.size(),
true,
&gpu_alloc_bytes),
"Error: get_rnnt_workspace_size in grad_check");
void* rnnt_gpu_workspace;
cudaMalloc(&rnnt_gpu_workspace, gpu_alloc_bytes);
throw_on_error(compute_rnnt_loss(acts_gpu,
grads_gpu,
label_gpu,
label_length_gpu,
input_length_gpu,
alphabet_size,
sizes.size(),
costs.data(),
rnnt_gpu_workspace,
options),
"Error: compute_rnnt_loss (0) in grad_check");
float cost = std::accumulate(costs.begin(), costs.end(), 0.);
cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream);
std::vector<float> num_grad(grads.size());
//perform 2nd order central differencing
numeric_grad(acts_gpu, label_gpu, label_length_gpu, input_length_gpu,
alphabet_size, minibatch, rnnt_gpu_workspace, options, num_grad);
cudaFree(acts_gpu);
cudaFree(rnnt_gpu_workspace);
cudaFree(grads_gpu);
cudaFree(label_gpu);
cudaFree(label_length_gpu);
cudaFree(input_length_gpu);
float diff = rel_diff(grads, num_grad);
return diff < tol;
}
bool run_tests() {
std::vector<std::tuple<int, int, int, int, float>> problem_sizes =
{std::make_tuple(20, 50, 15, 1, 1e-2),
std::make_tuple(5, 10, 5, 65, 1e-2)
};
std::mt19937 gen(2);
bool status = true;
for (auto problem : problem_sizes) {
int alphabet_size, T, L, minibatch;
float tol;
std::tie(alphabet_size, T, L, minibatch, tol) = problem;
std::vector<float> acts(alphabet_size * T * L * minibatch);
genActs(acts);
std::vector<float> log_probs(acts.size());
softmax(acts.data(), alphabet_size, minibatch * T * L, log_probs.data(), true);
std::vector<std::vector<int>> labels;
std::vector<int> sizes;
for (int mb = 0; mb < minibatch; ++mb) {
int actual_length = L - 1;
labels.push_back(genLabels(alphabet_size, actual_length));
sizes.push_back(T);
}
status &= grad_check(T, L, alphabet_size, acts, labels, sizes, tol);
}
return status;
}
int main(void) {
if (get_warprnnt_version() != 1) {
std::cerr << "Invalid Warp-transducer version." << std::endl;
return 1;
}
std::cout << "Running gpu tests" << std::endl;
bool status = true;
status &= small_test();
printf("finish small_test %d\n", status);
status &= options_test();
printf("finish options_test %d\n", status);
status &= inf_test();
printf("finish inf_test %d\n", status);
status &= run_tests();
printf("finished %d\n", status);
if (status) {
std::cout << "Tests pass" << std::endl;
return 0;
} else {
std::cout << "Some or all tests fail" << std::endl;
return 1;
}
}
|
886c5e8da14e6f327d5f600325ffa33392f73853.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlascl_diag.cu, normal z -> d, Mon Jun 25 18:24:12 2018
*/
#include "magma_internal.h"
#define MB 64
#define NB 160
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_diag_lower(
int m, int n,
const double* D, int ldd,
double* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_D_DIV( A[j*lda], D[j + j*ldd] );
}
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_diag_upper(
int m, int n,
const double* D, int ldd,
double* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_D_DIV( A[j*lda], D[ind_x + ind_x*ldd] );
}
}
}
/***************************************************************************//**
Purpose
-------
DLASCL_DIAG scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be upper triangular or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD DOUBLE PRECISION vector, dimension (LDDD,M)
The matrix storing the scaling factor on its diagonal.
@param[in]
lddd INTEGER
The leading dimension of the array D.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_dlascl_diag(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dD, magma_int_t lddd,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( lddd < max(1,m) )
*info = -5;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( MB );
dim3 grid( magma_ceildiv( m, MB ), magma_ceildiv( n, NB ) );
if (type == MagmaLower) {
hipLaunchKernelGGL(( dlascl_diag_lower)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dD, lddd, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( dlascl_diag_upper)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, dD, lddd, dA, ldda);
}
}
| 886c5e8da14e6f327d5f600325ffa33392f73853.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlascl_diag.cu, normal z -> d, Mon Jun 25 18:24:12 2018
*/
#include "magma_internal.h"
#define MB 64
#define NB 160
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
dlascl_diag_lower(
int m, int n,
const double* D, int ldd,
double* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_D_DIV( A[j*lda], D[j + j*ldd] );
}
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
dlascl_diag_upper(
int m, int n,
const double* D, int ldd,
double* A, int lda)
{
int ind_x = blockIdx.x * MB + threadIdx.x;
int ind_y = blockIdx.y * NB;
A += ind_x;
if (ind_x < m) {
for (int j=ind_y; j < min(ind_y+NB, n); j++ ) {
A[j*lda] = MAGMA_D_DIV( A[j*lda], D[ind_x + ind_x*ldd] );
}
}
}
/***************************************************************************//**
Purpose
-------
DLASCL_DIAG scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be upper triangular or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD DOUBLE PRECISION vector, dimension (LDDD,M)
The matrix storing the scaling factor on its diagonal.
@param[in]
lddd INTEGER
The leading dimension of the array D.
@param[in,out]
dA DOUBLE PRECISION array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_dlascl_diag(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dD, magma_int_t lddd,
magmaDouble_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( lddd < max(1,m) )
*info = -5;
else if ( ldda < max(1,m) )
*info = -7;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( MB );
dim3 grid( magma_ceildiv( m, MB ), magma_ceildiv( n, NB ) );
if (type == MagmaLower) {
dlascl_diag_lower
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, dD, lddd, dA, ldda);
}
else if (type == MagmaUpper) {
dlascl_diag_upper
<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, dD, lddd, dA, ldda);
}
}
|
7666a7cbbbbfa9169d0a40c3a715f18a6d0b6eb6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define threads 32
#define size 5
using namespace std;
__global__ void callOperationSharedStatic(int *a, int *b, int *res, int k, int p, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
__shared__ int s_a[size * size], s_b[size * size], s_res[size * size], s_p, s_k;
s_k = k;
s_p = p;
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = s_a[tid] - s_b[tid];
if (s_res[tid] < s_k) {
s_res[tid] = s_p;
}
res[tid] = s_res[tid];
} | 7666a7cbbbbfa9169d0a40c3a715f18a6d0b6eb6.cu | #include "includes.h"
#define threads 32
#define size 5
using namespace std;
__global__ void callOperationSharedStatic(int *a, int *b, int *res, int k, int p, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
__shared__ int s_a[size * size], s_b[size * size], s_res[size * size], s_p, s_k;
s_k = k;
s_p = p;
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = s_a[tid] - s_b[tid];
if (s_res[tid] < s_k) {
s_res[tid] = s_p;
}
res[tid] = s_res[tid];
} |
41182cb34d69bbad3408fab558f349731cc11102.hip | // !!! This is a file automatically generated by hipify!!!
// Based on CUDA SDK template from NVIDIA
// sgm algorithm adapted from http://lunokhod.org/?p=1403
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
#include <stdlib.h>
#include <limits>
#include <algorithm>
// includes, project
#include <cutil_inline.h>
#define MMAX_BRIGHTNESS 255
#define PENALTY1 15
#define PENALTY2 100
#define COSTS(i,j,d) costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define ACCUMULATED_COSTS(i,j,d) accumulated_costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define LEFT_IMAGE(i,j) left_image[(i)+(j)*nx]
#define RIGHT_IMAGE(i,j) right_image[(i)+(j)*nx]
#define DISP_IMAGE(i,j) disp_image[(i)+(j)*nx]
#define MMAX(a,b) (((a)>(b))?(a):(b))
#define MMIN(a,b) (((a)<(b))?(a):(b))
/* function headers */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range);
void evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) ;
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range ) ;
int find_min_index( const int *v, const int dist_range ) ;
void create_disparity_view( const int *accumulated_costs , int * disp_image, int nx, int ny) ;
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range );
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range );
void usage(char *command);
/* functions code */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range)
{
std::fill(costs, costs+nx*ny*disp_range, 255u);
for ( int j = 0; j < ny; j++ ) {
for ( int d = 0; d < disp_range; d++ ) {
for ( int i = d; i < nx; i++ ) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
}
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = 0; i < WIDTH; i++ ) {
if(i==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
}
}
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
}
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
// ADD two cost images
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int *im1_init = im1;
while ( im1 != (im1_init + (nx*ny*disp_range)) ) {
*im1 += *im2;
im1++;
im2++;
}
}
int find_min_index( const int *v, const int disp_range )
{
int min = std::numeric_limits<int>::max();
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
void evaluate_path(const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range)
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = std::numeric_limits<int>::max();
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = std::numeric_limits<int>::max();
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
void create_disparity_view( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range)
{
for ( int j = 0; j < ny; j++ ) {
for ( int i = 0; i < nx; i++ ) {
DISP_IMAGE(i,j) =
4 * find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
}
/*
* Links:
* http://www.dlr.de/rmc/rm/en/desktopdefault.aspx/tabid-9389/16104_read-39811/
* http://lunokhod.org/?p=1356
*/
// sgm code to run on the host
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range)
{
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// sgm code to run on the GPU
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range )
{
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
// imagem de saida
// geometria do kernel
// alocar a memoria
// reservar memoria para o accumulated_costs, hipMalloc com a dimensao q esta la
// h e de host => d para ser device
// d_dispIm e a saida
// ll d_bull.pgm
// ll d_bull.pgm
// ./testDiffs d_bull.pgm h_bull.pgm
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range ); // facil +
free(accumulated_costs);
}
// print command line format
void usage(char *command)
{
printf("Usage: %s [-h] [-d device] [-l leftimage] [-r rightimage] [-o dev_dispimage] [-t host_dispimage] [-p disprange] \n",command);
}
// main
int main( int argc, char** argv)
{
// default command line options
int deviceId = 0;
int disp_range = 32;
char *leftIn =(char *)"lbull.pgm",
*rightIn =(char *)"rbull.pgm",
*fileOut =(char *)"d_dbull.pgm",
*referenceOut=(char *)"h_dbull.pgm";
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:l:o:r:t:p:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'l': // left image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
leftIn = strdup(optarg);
break;
case 'r': // right image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
rightIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 't': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'p': // disp_range
if(sscanf(optarg,"%d",&disp_range)==0)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
if(optind < argc) {
fprintf(stderr,"Error in arguments\n");
usage(argv[0]);
exit(1);
}
// select cuda device
cutilSafeCall( hipSetDevice( deviceId ) );
// create events to measure host sgm time and device sgm time
hipEvent_t startH, stopH, startD, stopD;
hipEventCreate(&startH);
hipEventCreate(&stopH);
hipEventCreate(&startD);
hipEventCreate(&stopD);
// allocate host memory
int* h_ldata=NULL;
int* h_rdata=NULL;
unsigned int h,w;
//load left pgm
if (cutLoadPGMi(leftIn, (unsigned int **)&h_ldata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", leftIn);
exit(1);
}
//load right pgm
if (cutLoadPGMi(rightIn, (unsigned int **)&h_rdata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", rightIn);
exit(1);
}
// allocate mem for the result on host side
int* h_odata = (int*) malloc( h*w*sizeof(int));
int* reference = (int*) malloc( h*w*sizeof(int));
// sgm at host
hipEventRecord( startH, 0 );
sgmHost(h_ldata, h_rdata, reference, w, h, disp_range);
hipEventRecord( stopH, 0 );
hipEventSynchronize( stopH );
// sgm at GPU
hipEventRecord( startD, 0 );
sgmDevice(h_ldata, h_rdata, h_odata, w, h, disp_range);
hipEventRecord( stopD, 0 );
hipEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
hipEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
hipEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_ldata);
cutFree( h_rdata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
| 41182cb34d69bbad3408fab558f349731cc11102.cu |
// Based on CUDA SDK template from NVIDIA
// sgm algorithm adapted from http://lunokhod.org/?p=1403
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
#include <stdlib.h>
#include <limits>
#include <algorithm>
// includes, project
#include <cutil_inline.h>
#define MMAX_BRIGHTNESS 255
#define PENALTY1 15
#define PENALTY2 100
#define COSTS(i,j,d) costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define ACCUMULATED_COSTS(i,j,d) accumulated_costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define LEFT_IMAGE(i,j) left_image[(i)+(j)*nx]
#define RIGHT_IMAGE(i,j) right_image[(i)+(j)*nx]
#define DISP_IMAGE(i,j) disp_image[(i)+(j)*nx]
#define MMAX(a,b) (((a)>(b))?(a):(b))
#define MMIN(a,b) (((a)<(b))?(a):(b))
/* function headers */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range);
void evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) ;
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range ) ;
int find_min_index( const int *v, const int dist_range ) ;
void create_disparity_view( const int *accumulated_costs , int * disp_image, int nx, int ny) ;
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range );
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range );
void usage(char *command);
/* functions code */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range)
{
std::fill(costs, costs+nx*ny*disp_range, 255u);
for ( int j = 0; j < ny; j++ ) {
for ( int d = 0; d < disp_range; d++ ) {
for ( int i = d; i < nx; i++ ) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
}
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = 0; i < WIDTH; i++ ) {
if(i==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
}
}
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
}
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
// ADD two cost images
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int *im1_init = im1;
while ( im1 != (im1_init + (nx*ny*disp_range)) ) {
*im1 += *im2;
im1++;
im2++;
}
}
int find_min_index( const int *v, const int disp_range )
{
int min = std::numeric_limits<int>::max();
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
void evaluate_path(const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range)
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = std::numeric_limits<int>::max();
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = std::numeric_limits<int>::max();
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
void create_disparity_view( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range)
{
for ( int j = 0; j < ny; j++ ) {
for ( int i = 0; i < nx; i++ ) {
DISP_IMAGE(i,j) =
4 * find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
}
/*
* Links:
* http://www.dlr.de/rmc/rm/en/desktopdefault.aspx/tabid-9389/16104_read-39811/
* http://lunokhod.org/?p=1356
*/
// sgm code to run on the host
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range)
{
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// sgm code to run on the GPU
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range )
{
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
// imagem de saida
// geometria do kernel
// alocar a memoria
// reservar memoria para o accumulated_costs, cudaMalloc com a dimensao q esta la
// h e de host => d para ser device
// d_dispIm e a saida
// ll d_bull.pgm
// ll d_bull.pgm
// ./testDiffs d_bull.pgm h_bull.pgm
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range ); // facil +
free(accumulated_costs);
}
// print command line format
void usage(char *command)
{
printf("Usage: %s [-h] [-d device] [-l leftimage] [-r rightimage] [-o dev_dispimage] [-t host_dispimage] [-p disprange] \n",command);
}
// main
int main( int argc, char** argv)
{
// default command line options
int deviceId = 0;
int disp_range = 32;
char *leftIn =(char *)"lbull.pgm",
*rightIn =(char *)"rbull.pgm",
*fileOut =(char *)"d_dbull.pgm",
*referenceOut=(char *)"h_dbull.pgm";
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:l:o:r:t:p:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'l': // left image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
leftIn = strdup(optarg);
break;
case 'r': // right image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
rightIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 't': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'p': // disp_range
if(sscanf(optarg,"%d",&disp_range)==0)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
if(optind < argc) {
fprintf(stderr,"Error in arguments\n");
usage(argv[0]);
exit(1);
}
// select cuda device
cutilSafeCall( cudaSetDevice( deviceId ) );
// create events to measure host sgm time and device sgm time
cudaEvent_t startH, stopH, startD, stopD;
cudaEventCreate(&startH);
cudaEventCreate(&stopH);
cudaEventCreate(&startD);
cudaEventCreate(&stopD);
// allocate host memory
int* h_ldata=NULL;
int* h_rdata=NULL;
unsigned int h,w;
//load left pgm
if (cutLoadPGMi(leftIn, (unsigned int **)&h_ldata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", leftIn);
exit(1);
}
//load right pgm
if (cutLoadPGMi(rightIn, (unsigned int **)&h_rdata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", rightIn);
exit(1);
}
// allocate mem for the result on host side
int* h_odata = (int*) malloc( h*w*sizeof(int));
int* reference = (int*) malloc( h*w*sizeof(int));
// sgm at host
cudaEventRecord( startH, 0 );
sgmHost(h_ldata, h_rdata, reference, w, h, disp_range);
cudaEventRecord( stopH, 0 );
cudaEventSynchronize( stopH );
// sgm at GPU
cudaEventRecord( startD, 0 );
sgmDevice(h_ldata, h_rdata, h_odata, w, h, disp_range);
cudaEventRecord( stopD, 0 );
cudaEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
cudaEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
cudaEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_ldata);
cutFree( h_rdata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
|
2eaa026393cce6d095901d80b2459819f3990578.hip | // !!! This is a file automatically generated by hipify!!!
#include <dynamics/dynamics_stream_managed.cuh>
template<class CLASS_T, class PARAMS_T, int S_DIM, int C_DIM>
void GATE_internal::Dynamics<CLASS_T, PARAMS_T, S_DIM, C_DIM>::paramsToDevice() {
if (GPUMemStatus_) {
HANDLE_ERROR(hipMemcpyAsync(&model_d_->params_, ¶ms_,
sizeof(PARAMS_T), hipMemcpyHostToDevice,
stream_));
HANDLE_ERROR(hipMemcpyAsync(&model_d_->control_rngs_,
&control_rngs_,
C_DIM * sizeof(float2), hipMemcpyHostToDevice,
stream_));
HANDLE_ERROR(hipStreamSynchronize(stream_));
}
}
template<class CLASS_T, class PARAMS_T, int S_DIM, int C_DIM>
void GATE_internal::Dynamics<CLASS_T, PARAMS_T, S_DIM, C_DIM>::GPUSetup() {
CLASS_T* derived = static_cast<CLASS_T*>(this);
if (!GPUMemStatus_) {
model_d_ = Managed::GPUSetup(derived);
}
else {
std::cout << "GPU Memory already set" << std::endl;
}
derived->paramsToDevice();
}
template<class CLASS_T, class PARAMS_T, int S_DIM, int C_DIM>
void GATE_internal::Dynamics<CLASS_T, PARAMS_T, S_DIM, C_DIM>::freeCudaMem() {
if (GPUMemStatus_) {
hipFree(model_d_);
GPUMemStatus_ = false;
model_d_ = nullptr;
}
} | 2eaa026393cce6d095901d80b2459819f3990578.cu | #include <dynamics/dynamics_stream_managed.cuh>
template<class CLASS_T, class PARAMS_T, int S_DIM, int C_DIM>
void GATE_internal::Dynamics<CLASS_T, PARAMS_T, S_DIM, C_DIM>::paramsToDevice() {
if (GPUMemStatus_) {
HANDLE_ERROR(cudaMemcpyAsync(&model_d_->params_, ¶ms_,
sizeof(PARAMS_T), cudaMemcpyHostToDevice,
stream_));
HANDLE_ERROR(cudaMemcpyAsync(&model_d_->control_rngs_,
&control_rngs_,
C_DIM * sizeof(float2), cudaMemcpyHostToDevice,
stream_));
HANDLE_ERROR(cudaStreamSynchronize(stream_));
}
}
template<class CLASS_T, class PARAMS_T, int S_DIM, int C_DIM>
void GATE_internal::Dynamics<CLASS_T, PARAMS_T, S_DIM, C_DIM>::GPUSetup() {
CLASS_T* derived = static_cast<CLASS_T*>(this);
if (!GPUMemStatus_) {
model_d_ = Managed::GPUSetup(derived);
}
else {
std::cout << "GPU Memory already set" << std::endl;
}
derived->paramsToDevice();
}
template<class CLASS_T, class PARAMS_T, int S_DIM, int C_DIM>
void GATE_internal::Dynamics<CLASS_T, PARAMS_T, S_DIM, C_DIM>::freeCudaMem() {
if (GPUMemStatus_) {
cudaFree(model_d_);
GPUMemStatus_ = false;
model_d_ = nullptr;
}
} |
04134849fa0e3de4235534e11652d0d09762cf01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<unistd.h>
#include<sys/poll.h>
#include<sys/time.h>
#include<cuda.h>
#include<pthread.h>
#include<readline/readline.h>
#include<readline/history.h>
#include"tex.h"
#include"eqns.cu"
#include"vtk_writer_lib.h"
const char usageInfo[] =
"q: quit\n"
"h: help";
bool runSim = true;
pthread_t workerTh, controlTh;
tex_t tex1st;
tex_t tex2nd;
enum {UPDATE_PHI, UPDATE_C};
template<int update_field>
__global__ void kernelStencil(dataType* output, bool sem){
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
int north = row - 1;
int south = row + 1;
int east = col + 1;
int west = col - 1;
#if BCOND == PER
if(north < 0) north = GD_Y * BD_Y - 1;
if(south >= (GD_Y * BD_Y)) south = 0;
if(west < 0) west = GD_X * BD_X - 1;
if(east >= (GD_X * BD_X)) east = 0;
#else
if(north < 0) north = 0;
if(south >= (GD_Y * BD_Y)) south = (GD_Y * BD_Y) - 1;
if(west < 0) west = 0;
if(east >= (GD_X * BD_X)) east = (GD_X * BD_X) - 1;
#endif
if(update_field == UPDATE_PHI){
dataType tmpTexel;
// nearest neigbors
tmpTexel = FETCH_TEX(sem, col, row);
cellType C_phi = PHI_FROM_TEXEL(tmpTexel);
cellType C_c = C_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, west, row);
cellType W_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, row);
cellType E_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, col, north);
cellType N_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, col, south);
cellType S_phi = PHI_FROM_TEXEL(tmpTexel);
#if ANISOTROPY == YES
tmpTexel = FETCH_TEX(sem, west, north);
cellType NW_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, north);
cellType NE_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, west, south);
cellType SW_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, south);
cellType SE_phi = PHI_FROM_TEXEL(tmpTexel);
#endif
cellType newPhi = C_phi + DT * phiDot(PHIDOT_UNTYPED_ARG_LIST);
#if FTYPE == FLOAT
output[row * GD_X * BD_X + col].x = newPhi;
#else
output[row * GD_X * BD_X + col].x = __double2loint(newPhi);
output[row * GD_X * BD_X + col].y = __double2hiint(newPhi);
#endif
}
//UPDATE_C
else{
dataType tmpTexel;
// nearest neigbors
tmpTexel = FETCH_TEX(sem, col, row);
cellType C_phi = PHI_FROM_TEXEL(tmpTexel);
cellType C_c = C_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, west, row);
cellType W_phi = PHI_FROM_TEXEL(tmpTexel);
cellType W_c = C_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, row);
cellType E_phi = PHI_FROM_TEXEL(tmpTexel);
cellType E_c = C_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, col, north);
cellType N_phi = PHI_FROM_TEXEL(tmpTexel);
cellType N_c = C_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, col, south);
cellType S_phi = PHI_FROM_TEXEL(tmpTexel);
cellType S_c = C_FROM_TEXEL(tmpTexel);
// next-nearest neigbors
tmpTexel = FETCH_TEX(sem, west, north);
cellType NW_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, north);
cellType NE_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, west, south);
cellType SW_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, south);
cellType SE_phi = PHI_FROM_TEXEL(tmpTexel);
cellType C_phi_new = PHI_FROM_TEXEL(output[row * GD_X * BD_X + col]);
cellType W_phi_new = PHI_FROM_TEXEL(output[row * GD_X * BD_X + west]);
cellType E_phi_new = PHI_FROM_TEXEL(output[row * GD_X * BD_X + east]);
cellType N_phi_new = PHI_FROM_TEXEL(output[north * GD_X * BD_X + col]);
cellType S_phi_new = PHI_FROM_TEXEL(output[south * GD_X * BD_X + col]);
cellType newC = C_c + DT * cDot(CDOT_UNTYPED_ARG_LIST);
#if FTYPE == FLOAT
output[row * GD_X * BD_X + col].y = newC;
#else
output[row * GD_X * BD_X + col].z = __double2loint(newC);
output[row * GD_X * BD_X + col].w = __double2hiint(newC);
#endif
}
}
//****************************************************************************************************
// several initial conditions
//****************************************************************************************************
void writeInitialConditionSingleNucleus(float2* fields, int xDim, int yDim){
int cR = xDim / 2;
int cC = yDim / 2;
float r = 0.0;
float r0 = 2 * 10.0;
float tmp;
for(int row = 0; row < xDim; row++){
for(int col = 0; col < yDim; col++){
if((r=sqrt((row - cR) * (row - cR) + (col - cC) * (col - cC))) < r0){
tmp = -tanh((r-r0/2.0)/(M_SQRT2/0.4));
fields[col * xDim + row].x = tmp;
fields[col * xDim + row].y = (1.0-(1.0-KPART)*OMEGA)*((1.0+KPART)/2.0-(1.0-KPART)/2.0*tmp);
}
}
}
}
void writeInitialConditionHomogene(float2* output, int xDim, int yDim, float valField1, float valField2){
int elements = xDim * yDim;
for(int index = 0; index < elements; index++){
output[index].x = valField1;
output[index].y = valField2;
}
}
void writeInitialConditionSingleNucleus(int4* fields, int xDim, int yDim){
int cR = xDim / 2;
int cC = yDim / 2;
double r = 0.0;
double r0 = 2 * 10.0;
double2 tmp;
for(int row = 0; row < xDim; row++){
for(int col = 0; col < yDim; col++){
if((r=sqrt((row - cR) * (row - cR) + (col - cC) * (col - cC))) < r0){
tmp.x = -tanh((r-r0/2.0)/(M_SQRT2/0.4));
tmp.y = (1.0-(1.0-KPART)*OMEGA)*((1.0+KPART)/2.0-(1.0-KPART)/2.0*tmp.x);
*((double2*)fields + col * xDim + row) = tmp;
}
}
}
}
void writeInitialConditionHomogene(int4* output, int xDim, int yDim, double valField1, double valField2){
int elements = xDim * yDim;
double2 tmp;
tmp.x = valField1;
tmp.y = valField2;
for(int index = 0; index < elements; index++){
*((double2*)output + index) = tmp;
}
}
void cumulativeTimerStop(hipEvent_t startEvent, hipEvent_t stopEvent, float* cumTm){
float tm;
hipEventRecord(stopEvent);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&tm, startEvent, stopEvent);
*cumTm += tm;
}
void cumulativeTimerStart(hipEvent_t startEvent){
hipEventRecord(startEvent);
}
void saveFields(float2* input, int xDim, int yDim, int fileCounter){
char fileName[STRINGLENGTH];
float* field = (float*)malloc(xDim * yDim * sizeof(float));
for(int index = 0; index < xDim * yDim; index++){
field[index] = input[index].x;
}
writeImageData(genFileName(fileName, "tex_phi_", fileCounter), xDim, yDim, field, false);
for(int index = 0; index < xDim * yDim; index++){
field[index] = input[index].y;
}
writeImageData(genFileName(fileName, "tex_c_", fileCounter), xDim, yDim, field, 0);
free(field);
}
void saveFields(int4* input, int xDim, int yDim, int fileCounter){
char fileName[STRINGLENGTH];
double2 tmp;
#ifdef SAVEFLOAT
float* field = (float*)malloc(xDim * yDim * sizeof(float));
#else
double* field = (double*)malloc(xDim * yDim * sizeof(double));
#endif
for(int index = 0; index < xDim * yDim; index++){
tmp = *((double2*)input + index);
field[index] = tmp.x;
}
writeImageData(genFileName(fileName, "tex_phi_", fileCounter), xDim, yDim, field, false);
for(int index = 0; index < xDim * yDim; index++){
tmp = *((double2*)input + index);
field[index] = tmp.y;
}
writeImageData(genFileName(fileName, "tex_c_", fileCounter), xDim, yDim, field, 0);
free(field);
}
char* genFileName(char* fileName, char* prefix, int fileCounter){
char fileCounterString[STRINGLENGTH];
memset(fileName, 0, STRINGLENGTH);
strcat(fileName, prefix);
sprintf(fileCounterString, "%.4d", fileCounter);
strcat(fileName, fileCounterString);
strcat(fileName, ".vti");
return fileName;
}
void commandHandler(char* command){
if(command){
if (strcmp(command,"h") == 0){
printf("%s\n",usageInfo);
}
if (strcmp(command,"q") == 0){
runSim = false;
}
printf("\n~ %s",command);
if (command[0]!=0)
add_history(command);
}
else{
runSim = false;
}
}
void* controlThread(void* ptr){
pollfd cinfd[1];
rl_callback_handler_install ("\n$>", commandHandler);
cinfd[0].fd = fileno(stdin);
cinfd[0].events = POLLIN;
rl_bind_key('\t',rl_abort);
while(runSim){
if(poll(cinfd, 1, 1)){
rl_callback_read_char();
}
}
rl_callback_handler_remove();
return 0;
}
void* workerThread(void* ptr){
int iterations = ITERS;
int fileCounter = 0;
struct timeval startTime, endTime;
simulationVariables_t<dataType> sVars;
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
float gpuElapsedTime;
//execution configuration
dim3 gridDims(GD_X, GD_Y);
dim3 blockDims(BD_X, BD_Y);
writeInitialConditionHomogene(sVars.hostFields, GD_X * BD_X, GD_Y * BD_Y,
F(-1.0),
F(1.0) - (F(1.0) - KPART) * OMEGA);
writeInitialConditionSingleNucleus(sVars.hostFields, GD_X * BD_X, GD_Y * BD_Y);
sVars.upload();
//setting up texture bindings
tex1st.filterMode = hipFilterModePoint;
tex1st.normalized = 0;
tex2nd.filterMode = hipFilterModePoint;
tex2nd.normalized = 0;
hipBindTexture2D(NULL, tex1st, sVars.deviceFields1st,
channelDescriptor,
GD_X * BD_X,
GD_Y * BD_Y,
GD_X * BD_X * sizeof(dataType));
hipBindTexture2D(NULL, tex2nd, sVars.deviceFields2nd,
channelDescriptor,
GD_X * BD_X,
GD_Y * BD_Y,
GD_X * BD_X * sizeof(dataType));
bool semaphore = 0;
// main loop
hipDeviceSynchronize();
printf("initialization done\n");
gettimeofday(&startTime, NULL);
while((iterations--) && runSim){
semaphore = !semaphore;
if(!TIMEIT){
printf("%d. iteration\n", fileCounter);
if((fileCounter % WRITEEVERY == 0) || iterations == 0){
hipMemcpy(sVars.hostFields, semaphore ? sVars.deviceFields1st : sVars.deviceFields2nd, sVars.fieldsNBytes, hipMemcpyDeviceToHost);
saveFields(sVars.hostFields, GD_X * BD_X, GD_Y * BD_Y, fileCounter);
}
}
cumulativeTimerStart(startEvent);
hipLaunchKernelGGL(( kernelStencil<UPDATE_PHI>), dim3(gridDims), dim3(blockDims), 0, 0, semaphore ? sVars.deviceFields2nd : sVars.deviceFields1st, semaphore);
hipLaunchKernelGGL(( kernelStencil<UPDATE_C>), dim3(gridDims), dim3(blockDims), 0, 0, semaphore ? sVars.deviceFields2nd : sVars.deviceFields1st, semaphore);
cumulativeTimerStop(startEvent, stopEvent, &gpuElapsedTime);
fileCounter++;
}
// timing
hipDeviceSynchronize();
gettimeofday(&endTime, NULL);
printf("GPU timer: %d ms\n", (int)gpuElapsedTime);
printf("CPU timer: %d ms\n",
(int)(((endTime.tv_sec - startTime.tv_sec) * 1000
+ (endTime.tv_usec - startTime.tv_usec)/1000.0)
+ 0.5));
// cleaning up
hipUnbindTexture(tex1st);
hipUnbindTexture(tex2nd);
runSim = false;
return 0;
}
int main(void){
pthread_create(&workerTh, NULL, workerThread, NULL);
pthread_create(&controlTh, NULL, controlThread, NULL);
pthread_join(workerTh, NULL);
pthread_join(controlTh, NULL);
return 0;
}
| 04134849fa0e3de4235534e11652d0d09762cf01.cu | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<unistd.h>
#include<sys/poll.h>
#include<sys/time.h>
#include<cuda.h>
#include<pthread.h>
#include<readline/readline.h>
#include<readline/history.h>
#include"tex.h"
#include"eqns.cu"
#include"vtk_writer_lib.h"
const char usageInfo[] =
"q: quit\n"
"h: help";
bool runSim = true;
pthread_t workerTh, controlTh;
tex_t tex1st;
tex_t tex2nd;
enum {UPDATE_PHI, UPDATE_C};
template<int update_field>
__global__ void kernelStencil(dataType* output, bool sem){
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
int north = row - 1;
int south = row + 1;
int east = col + 1;
int west = col - 1;
#if BCOND == PER
if(north < 0) north = GD_Y * BD_Y - 1;
if(south >= (GD_Y * BD_Y)) south = 0;
if(west < 0) west = GD_X * BD_X - 1;
if(east >= (GD_X * BD_X)) east = 0;
#else
if(north < 0) north = 0;
if(south >= (GD_Y * BD_Y)) south = (GD_Y * BD_Y) - 1;
if(west < 0) west = 0;
if(east >= (GD_X * BD_X)) east = (GD_X * BD_X) - 1;
#endif
if(update_field == UPDATE_PHI){
dataType tmpTexel;
// nearest neigbors
tmpTexel = FETCH_TEX(sem, col, row);
cellType C_phi = PHI_FROM_TEXEL(tmpTexel);
cellType C_c = C_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, west, row);
cellType W_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, row);
cellType E_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, col, north);
cellType N_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, col, south);
cellType S_phi = PHI_FROM_TEXEL(tmpTexel);
#if ANISOTROPY == YES
tmpTexel = FETCH_TEX(sem, west, north);
cellType NW_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, north);
cellType NE_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, west, south);
cellType SW_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, south);
cellType SE_phi = PHI_FROM_TEXEL(tmpTexel);
#endif
cellType newPhi = C_phi + DT * phiDot(PHIDOT_UNTYPED_ARG_LIST);
#if FTYPE == FLOAT
output[row * GD_X * BD_X + col].x = newPhi;
#else
output[row * GD_X * BD_X + col].x = __double2loint(newPhi);
output[row * GD_X * BD_X + col].y = __double2hiint(newPhi);
#endif
}
//UPDATE_C
else{
dataType tmpTexel;
// nearest neigbors
tmpTexel = FETCH_TEX(sem, col, row);
cellType C_phi = PHI_FROM_TEXEL(tmpTexel);
cellType C_c = C_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, west, row);
cellType W_phi = PHI_FROM_TEXEL(tmpTexel);
cellType W_c = C_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, row);
cellType E_phi = PHI_FROM_TEXEL(tmpTexel);
cellType E_c = C_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, col, north);
cellType N_phi = PHI_FROM_TEXEL(tmpTexel);
cellType N_c = C_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, col, south);
cellType S_phi = PHI_FROM_TEXEL(tmpTexel);
cellType S_c = C_FROM_TEXEL(tmpTexel);
// next-nearest neigbors
tmpTexel = FETCH_TEX(sem, west, north);
cellType NW_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, north);
cellType NE_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, west, south);
cellType SW_phi = PHI_FROM_TEXEL(tmpTexel);
tmpTexel = FETCH_TEX(sem, east, south);
cellType SE_phi = PHI_FROM_TEXEL(tmpTexel);
cellType C_phi_new = PHI_FROM_TEXEL(output[row * GD_X * BD_X + col]);
cellType W_phi_new = PHI_FROM_TEXEL(output[row * GD_X * BD_X + west]);
cellType E_phi_new = PHI_FROM_TEXEL(output[row * GD_X * BD_X + east]);
cellType N_phi_new = PHI_FROM_TEXEL(output[north * GD_X * BD_X + col]);
cellType S_phi_new = PHI_FROM_TEXEL(output[south * GD_X * BD_X + col]);
cellType newC = C_c + DT * cDot(CDOT_UNTYPED_ARG_LIST);
#if FTYPE == FLOAT
output[row * GD_X * BD_X + col].y = newC;
#else
output[row * GD_X * BD_X + col].z = __double2loint(newC);
output[row * GD_X * BD_X + col].w = __double2hiint(newC);
#endif
}
}
//****************************************************************************************************
// several initial conditions
//****************************************************************************************************
void writeInitialConditionSingleNucleus(float2* fields, int xDim, int yDim){
int cR = xDim / 2;
int cC = yDim / 2;
float r = 0.0;
float r0 = 2 * 10.0;
float tmp;
for(int row = 0; row < xDim; row++){
for(int col = 0; col < yDim; col++){
if((r=sqrt((row - cR) * (row - cR) + (col - cC) * (col - cC))) < r0){
tmp = -tanh((r-r0/2.0)/(M_SQRT2/0.4));
fields[col * xDim + row].x = tmp;
fields[col * xDim + row].y = (1.0-(1.0-KPART)*OMEGA)*((1.0+KPART)/2.0-(1.0-KPART)/2.0*tmp);
}
}
}
}
void writeInitialConditionHomogene(float2* output, int xDim, int yDim, float valField1, float valField2){
int elements = xDim * yDim;
for(int index = 0; index < elements; index++){
output[index].x = valField1;
output[index].y = valField2;
}
}
void writeInitialConditionSingleNucleus(int4* fields, int xDim, int yDim){
int cR = xDim / 2;
int cC = yDim / 2;
double r = 0.0;
double r0 = 2 * 10.0;
double2 tmp;
for(int row = 0; row < xDim; row++){
for(int col = 0; col < yDim; col++){
if((r=sqrt((row - cR) * (row - cR) + (col - cC) * (col - cC))) < r0){
tmp.x = -tanh((r-r0/2.0)/(M_SQRT2/0.4));
tmp.y = (1.0-(1.0-KPART)*OMEGA)*((1.0+KPART)/2.0-(1.0-KPART)/2.0*tmp.x);
*((double2*)fields + col * xDim + row) = tmp;
}
}
}
}
void writeInitialConditionHomogene(int4* output, int xDim, int yDim, double valField1, double valField2){
int elements = xDim * yDim;
double2 tmp;
tmp.x = valField1;
tmp.y = valField2;
for(int index = 0; index < elements; index++){
*((double2*)output + index) = tmp;
}
}
void cumulativeTimerStop(cudaEvent_t startEvent, cudaEvent_t stopEvent, float* cumTm){
float tm;
cudaEventRecord(stopEvent);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&tm, startEvent, stopEvent);
*cumTm += tm;
}
void cumulativeTimerStart(cudaEvent_t startEvent){
cudaEventRecord(startEvent);
}
void saveFields(float2* input, int xDim, int yDim, int fileCounter){
char fileName[STRINGLENGTH];
float* field = (float*)malloc(xDim * yDim * sizeof(float));
for(int index = 0; index < xDim * yDim; index++){
field[index] = input[index].x;
}
writeImageData(genFileName(fileName, "tex_phi_", fileCounter), xDim, yDim, field, false);
for(int index = 0; index < xDim * yDim; index++){
field[index] = input[index].y;
}
writeImageData(genFileName(fileName, "tex_c_", fileCounter), xDim, yDim, field, 0);
free(field);
}
void saveFields(int4* input, int xDim, int yDim, int fileCounter){
char fileName[STRINGLENGTH];
double2 tmp;
#ifdef SAVEFLOAT
float* field = (float*)malloc(xDim * yDim * sizeof(float));
#else
double* field = (double*)malloc(xDim * yDim * sizeof(double));
#endif
for(int index = 0; index < xDim * yDim; index++){
tmp = *((double2*)input + index);
field[index] = tmp.x;
}
writeImageData(genFileName(fileName, "tex_phi_", fileCounter), xDim, yDim, field, false);
for(int index = 0; index < xDim * yDim; index++){
tmp = *((double2*)input + index);
field[index] = tmp.y;
}
writeImageData(genFileName(fileName, "tex_c_", fileCounter), xDim, yDim, field, 0);
free(field);
}
char* genFileName(char* fileName, char* prefix, int fileCounter){
char fileCounterString[STRINGLENGTH];
memset(fileName, 0, STRINGLENGTH);
strcat(fileName, prefix);
sprintf(fileCounterString, "%.4d", fileCounter);
strcat(fileName, fileCounterString);
strcat(fileName, ".vti");
return fileName;
}
void commandHandler(char* command){
if(command){
if (strcmp(command,"h") == 0){
printf("%s\n",usageInfo);
}
if (strcmp(command,"q") == 0){
runSim = false;
}
printf("\n~ %s",command);
if (command[0]!=0)
add_history(command);
}
else{
runSim = false;
}
}
void* controlThread(void* ptr){
pollfd cinfd[1];
rl_callback_handler_install ("\n$>", commandHandler);
cinfd[0].fd = fileno(stdin);
cinfd[0].events = POLLIN;
rl_bind_key('\t',rl_abort);
while(runSim){
if(poll(cinfd, 1, 1)){
rl_callback_read_char();
}
}
rl_callback_handler_remove();
return 0;
}
void* workerThread(void* ptr){
int iterations = ITERS;
int fileCounter = 0;
struct timeval startTime, endTime;
simulationVariables_t<dataType> sVars;
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
float gpuElapsedTime;
//execution configuration
dim3 gridDims(GD_X, GD_Y);
dim3 blockDims(BD_X, BD_Y);
writeInitialConditionHomogene(sVars.hostFields, GD_X * BD_X, GD_Y * BD_Y,
F(-1.0),
F(1.0) - (F(1.0) - KPART) * OMEGA);
writeInitialConditionSingleNucleus(sVars.hostFields, GD_X * BD_X, GD_Y * BD_Y);
sVars.upload();
//setting up texture bindings
tex1st.filterMode = cudaFilterModePoint;
tex1st.normalized = 0;
tex2nd.filterMode = cudaFilterModePoint;
tex2nd.normalized = 0;
cudaBindTexture2D(NULL, tex1st, sVars.deviceFields1st,
channelDescriptor,
GD_X * BD_X,
GD_Y * BD_Y,
GD_X * BD_X * sizeof(dataType));
cudaBindTexture2D(NULL, tex2nd, sVars.deviceFields2nd,
channelDescriptor,
GD_X * BD_X,
GD_Y * BD_Y,
GD_X * BD_X * sizeof(dataType));
bool semaphore = 0;
// main loop
cudaThreadSynchronize();
printf("initialization done\n");
gettimeofday(&startTime, NULL);
while((iterations--) && runSim){
semaphore = !semaphore;
if(!TIMEIT){
printf("%d. iteration\n", fileCounter);
if((fileCounter % WRITEEVERY == 0) || iterations == 0){
cudaMemcpy(sVars.hostFields, semaphore ? sVars.deviceFields1st : sVars.deviceFields2nd, sVars.fieldsNBytes, cudaMemcpyDeviceToHost);
saveFields(sVars.hostFields, GD_X * BD_X, GD_Y * BD_Y, fileCounter);
}
}
cumulativeTimerStart(startEvent);
kernelStencil<UPDATE_PHI><<<gridDims, blockDims>>>(semaphore ? sVars.deviceFields2nd : sVars.deviceFields1st, semaphore);
kernelStencil<UPDATE_C><<<gridDims, blockDims>>>(semaphore ? sVars.deviceFields2nd : sVars.deviceFields1st, semaphore);
cumulativeTimerStop(startEvent, stopEvent, &gpuElapsedTime);
fileCounter++;
}
// timing
cudaThreadSynchronize();
gettimeofday(&endTime, NULL);
printf("GPU timer: %d ms\n", (int)gpuElapsedTime);
printf("CPU timer: %d ms\n",
(int)(((endTime.tv_sec - startTime.tv_sec) * 1000
+ (endTime.tv_usec - startTime.tv_usec)/1000.0)
+ 0.5));
// cleaning up
cudaUnbindTexture(tex1st);
cudaUnbindTexture(tex2nd);
runSim = false;
return 0;
}
int main(void){
pthread_create(&workerTh, NULL, workerThread, NULL);
pthread_create(&controlTh, NULL, controlThread, NULL);
pthread_join(workerTh, NULL);
pthread_join(controlTh, NULL);
return 0;
}
|
40b3f8c9e13ffacc1d0f1566badd6eb527a1a4eb.hip | // !!! This is a file automatically generated by hipify!!!
// Using CUDA device to calculate pi
#include <iostream>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <fstream>
#include <chrono>
#include "functions/Rastrigin.h"
#include "random/UniformRandom.h"
#include "random/UniformRandomInt.h"
#include "random/GaussianRandom.h"
#include "operators/mutations/GaussianMutator.h"
#include "operators/xover/LinearXOver.h"
#include "ga/Hipercube.h"
#include "selection/Tournament.h"
#include "ga/HAEA/AbstractHAEA.h"
using namespace std;
using ns = std::chrono::microseconds;
using get_time = std::chrono::steady_clock;
void test();
// Main routine that executes on the host
int main() {
Hipercube h(-5.12, 5.12, 10000);
thrust::device_vector<double> a = h.getRandomIndividual();
thrust::device_vector<double> b = h.getRandomIndividual();
vector<thrust::device_vector<double>> inds;
inds.push_back(a);
inds.push_back(b);
GaussianMutator gaussianMutator(0.0, 0.3, 0.1);
LinearXOver linearXOver;
auto start = get_time::now();
for(int i = 0; i < 100; ++i) {
h.repair(a);
}
auto end = get_time::now();
auto diff = end - start;
cout << std::chrono::duration_cast<ns>(diff).count() << " ";
//test();
return 0;
}
void test() {
size_t popSize[] = {100};
for(int i = 0; i < 1; ++i) {
std::cout << "population size: " << popSize[i] << std::endl;
std::ofstream file;
file.open(std::to_string(popSize[i]) + ".txt");
size_t ITERS = 1000;
size_t POP = popSize[i];
size_t DIM = 1000;
int sampling = 2;
Hipercube space(-5.12, 5.12, static_cast<int>(DIM));
Rastrigin optimizationFunction;
Tournament selection(optimizationFunction, 4);
std::shared_ptr<Operator<thrust::device_vector<double> > > lxo = std::make_shared<LinearXOver>(),
gm = std::make_shared<GaussianMutator>(0.0, 0.3, 0.1);
std::vector< std::shared_ptr<Operator<thrust::device_vector<double> > > > opers(2);
opers[0] = lxo;
opers[1] = gm;
AbstractHAEA<thrust::device_vector<double>> search(selection, opers, POP, ITERS);
for(int k = 0; k < sampling; ++k) {
std::cout << "iter: " << k << std::endl;
auto start = get_time::now();
search.solve(&space, &optimizationFunction);
auto end = get_time::now();
auto diff = end - start;
cout << std::chrono::duration_cast<ns>(diff).count() << " ";
file << std::chrono::duration_cast<ns>(diff).count() << " ";
/*thrust::host_vector<thrust::device_vector<double> > result = search.solve(&space, &optimizationFunction);
double mean = 0.0;
for(size_t i = 0; i < result.size(); ++i) {
for(size_t j = 0; j < result[0].size(); ++j) {
cout << result[i][j] << " ";
}
mean += optimizationFunction.apply(result[i]);
}
mean /= result.size();
rmean += mean;*/
}
file.close();
}
} | 40b3f8c9e13ffacc1d0f1566badd6eb527a1a4eb.cu | // Using CUDA device to calculate pi
#include <iostream>
#include <cuda.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <fstream>
#include <chrono>
#include "functions/Rastrigin.h"
#include "random/UniformRandom.h"
#include "random/UniformRandomInt.h"
#include "random/GaussianRandom.h"
#include "operators/mutations/GaussianMutator.h"
#include "operators/xover/LinearXOver.h"
#include "ga/Hipercube.h"
#include "selection/Tournament.h"
#include "ga/HAEA/AbstractHAEA.h"
using namespace std;
using ns = std::chrono::microseconds;
using get_time = std::chrono::steady_clock;
void test();
// Main routine that executes on the host
int main() {
Hipercube h(-5.12, 5.12, 10000);
thrust::device_vector<double> a = h.getRandomIndividual();
thrust::device_vector<double> b = h.getRandomIndividual();
vector<thrust::device_vector<double>> inds;
inds.push_back(a);
inds.push_back(b);
GaussianMutator gaussianMutator(0.0, 0.3, 0.1);
LinearXOver linearXOver;
auto start = get_time::now();
for(int i = 0; i < 100; ++i) {
h.repair(a);
}
auto end = get_time::now();
auto diff = end - start;
cout << std::chrono::duration_cast<ns>(diff).count() << " ";
//test();
return 0;
}
void test() {
size_t popSize[] = {100};
for(int i = 0; i < 1; ++i) {
std::cout << "population size: " << popSize[i] << std::endl;
std::ofstream file;
file.open(std::to_string(popSize[i]) + ".txt");
size_t ITERS = 1000;
size_t POP = popSize[i];
size_t DIM = 1000;
int sampling = 2;
Hipercube space(-5.12, 5.12, static_cast<int>(DIM));
Rastrigin optimizationFunction;
Tournament selection(optimizationFunction, 4);
std::shared_ptr<Operator<thrust::device_vector<double> > > lxo = std::make_shared<LinearXOver>(),
gm = std::make_shared<GaussianMutator>(0.0, 0.3, 0.1);
std::vector< std::shared_ptr<Operator<thrust::device_vector<double> > > > opers(2);
opers[0] = lxo;
opers[1] = gm;
AbstractHAEA<thrust::device_vector<double>> search(selection, opers, POP, ITERS);
for(int k = 0; k < sampling; ++k) {
std::cout << "iter: " << k << std::endl;
auto start = get_time::now();
search.solve(&space, &optimizationFunction);
auto end = get_time::now();
auto diff = end - start;
cout << std::chrono::duration_cast<ns>(diff).count() << " ";
file << std::chrono::duration_cast<ns>(diff).count() << " ";
/*thrust::host_vector<thrust::device_vector<double> > result = search.solve(&space, &optimizationFunction);
double mean = 0.0;
for(size_t i = 0; i < result.size(); ++i) {
for(size_t j = 0; j < result[0].size(); ++j) {
cout << result[i][j] << " ";
}
mean += optimizationFunction.apply(result[i]);
}
mean /= result.size();
rmean += mean;*/
}
file.close();
}
} |
e7a0a85d0cdf1b0f2116a7ac462253877cd9de03.hip | // !!! This is a file automatically generated by hipify!!!
#include "model.h"
#include "../cuda_err.h"
void Model::loadToGPU(Model *gpu_model)
{
Model cpu_copy = *this;
cpu_copy.triangles = nullptr;
gpu_model = nullptr;
// GPU,
gpuErrchk(hipMalloc(&(cpu_copy.triangles), num_of_triangles * sizeof(Triangle)));
gpuErrchk(hipMemcpy(cpu_copy.triangles, this->triangles,
num_of_triangles * sizeof(Triangle), hipMemcpyHostToDevice));
// GPU
gpuErrchk(hipMalloc(&gpu_model, sizeof(Model)));
gpuErrchk(hipMemcpy(gpu_model, &cpu_copy, sizeof(Model), hipMemcpyHostToDevice));
}
void Model::destroyGPU(Model *gpu_model)
{
if (gpu_model != nullptr) {
Model cpu_copy;
gpuErrchk(hipMemcpy(&cpu_copy, gpu_model, sizeof(Model), hipMemcpyDeviceToHost));
if (cpu_copy.triangles != nullptr) {
gpuErrchk(hipFree(cpu_copy.triangles));
}
gpuErrchk(hipFree(gpu_model));
}
}
| e7a0a85d0cdf1b0f2116a7ac462253877cd9de03.cu | #include "model.h"
#include "../cuda_err.h"
void Model::loadToGPU(Model *gpu_model)
{
Model cpu_copy = *this;
cpu_copy.triangles = nullptr;
gpu_model = nullptr;
//Выделить память под массив треугольников на GPU, скопировать
gpuErrchk(cudaMalloc(&(cpu_copy.triangles), num_of_triangles * sizeof(Triangle)));
gpuErrchk(cudaMemcpy(cpu_copy.triangles, this->triangles,
num_of_triangles * sizeof(Triangle), cudaMemcpyHostToDevice));
//Выделить память под структуру на GPU и отправить туда ее копию
gpuErrchk(cudaMalloc(&gpu_model, sizeof(Model)));
gpuErrchk(cudaMemcpy(gpu_model, &cpu_copy, sizeof(Model), cudaMemcpyHostToDevice));
}
void Model::destroyGPU(Model *gpu_model)
{
if (gpu_model != nullptr) {
Model cpu_copy;
gpuErrchk(cudaMemcpy(&cpu_copy, gpu_model, sizeof(Model), cudaMemcpyDeviceToHost));
if (cpu_copy.triangles != nullptr) {
gpuErrchk(cudaFree(cpu_copy.triangles));
}
gpuErrchk(cudaFree(gpu_model));
}
}
|
1cb0507720039f832e0eba1187951e0589bc70a3.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <sys/time.h>
#include <queue>
#include "cuda_util.hpp"
using namespace std;
typedef float real_t;
struct n_t {
n_t ( int idx, float v ) : idx ( idx ), v ( v ) { }
bool operator < ( n_t const & o ) const { return v < o.v; }
int idx;
float v;
};
char const * TNN_type = "sTNN_cublas";
double run_TNN ( real_t * A, real_t * C, int n, int d, int t, int rep, int dev ) {
SAFE_CALL ( hipSetDevice ( dev ) );
hipblasHandle_t handle;
SAFE_CALL ( hipblasCreate ( &handle ) );
int lda = n;
int ldc = n;
real_t alpha = ( real_t ) -2.0;
real_t beta = ( real_t ) 1.0;
priority_queue < n_t > * pq = new priority_queue < n_t >[n];
real_t *norm;
SAFE_CALL( hipHostMalloc ( &norm, sizeof( real_t ) * n, hipHostMallocDefault ) );
struct timeval tb, te;
gettimeofday ( &tb, NULL );
for ( int i = 0; i < n; ++i )
SAFE_CALL( hipblasSnrm2( handle, n, A + i, n, norm + i ) );
for ( int i = 0; i < n; ++i ) for( int j = 0; j <= n; ++j )
C[ i * n + j] = norm[i] + norm[j];
for ( int r = 0; r < rep; ++r ) {
SAFE_CALL ( hipblasSsyrk (
handle,
HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N,
n, d,
&alpha,
A, lda,
&beta,
C, ldc
) );
SAFE_CALL ( hipDeviceSynchronize ( ) );
for ( int j = 0; j < n; ++j ) {
for ( int i = 0; i < j; ++i ) {
float v = ( float ) C[j + n + i];
pq[i].push ( n_t ( j, v ) );
if ( pq[i].size ( ) > t ) { pq[i].pop ( ); }
pq[j].push ( n_t ( i, v ) );
if ( pq[j].size ( ) > t ) { pq[j].pop ( ); }
}
}
}
gettimeofday ( &te, NULL );
delete [] pq;
SAFE_CALL( hipHostFree( norm ) );
SAFE_CALL ( hipblasDestroy ( handle ) );
return te.tv_sec - tb.tv_sec + ( te.tv_usec - tb.tv_usec ) * 1E-6;
}
| 1cb0507720039f832e0eba1187951e0589bc70a3.cu | #include <cublas_v2.h>
#include <sys/time.h>
#include <queue>
#include "cuda_util.hpp"
using namespace std;
typedef float real_t;
struct n_t {
n_t ( int idx, float v ) : idx ( idx ), v ( v ) { }
bool operator < ( n_t const & o ) const { return v < o.v; }
int idx;
float v;
};
char const * TNN_type = "sTNN_cublas";
double run_TNN ( real_t * A, real_t * C, int n, int d, int t, int rep, int dev ) {
SAFE_CALL ( cudaSetDevice ( dev ) );
cublasHandle_t handle;
SAFE_CALL ( cublasCreate ( &handle ) );
int lda = n;
int ldc = n;
real_t alpha = ( real_t ) -2.0;
real_t beta = ( real_t ) 1.0;
priority_queue < n_t > * pq = new priority_queue < n_t >[n];
real_t *norm;
SAFE_CALL( cudaHostAlloc ( &norm, sizeof( real_t ) * n, cudaHostAllocDefault ) );
struct timeval tb, te;
gettimeofday ( &tb, NULL );
for ( int i = 0; i < n; ++i )
SAFE_CALL( cublasSnrm2( handle, n, A + i, n, norm + i ) );
for ( int i = 0; i < n; ++i ) for( int j = 0; j <= n; ++j )
C[ i * n + j] = norm[i] + norm[j];
for ( int r = 0; r < rep; ++r ) {
SAFE_CALL ( cublasSsyrk (
handle,
CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N,
n, d,
&alpha,
A, lda,
&beta,
C, ldc
) );
SAFE_CALL ( cudaDeviceSynchronize ( ) );
for ( int j = 0; j < n; ++j ) {
for ( int i = 0; i < j; ++i ) {
float v = ( float ) C[j + n + i];
pq[i].push ( n_t ( j, v ) );
if ( pq[i].size ( ) > t ) { pq[i].pop ( ); }
pq[j].push ( n_t ( i, v ) );
if ( pq[j].size ( ) > t ) { pq[j].pop ( ); }
}
}
}
gettimeofday ( &te, NULL );
delete [] pq;
SAFE_CALL( cudaFreeHost( norm ) );
SAFE_CALL ( cublasDestroy ( handle ) );
return te.tv_sec - tb.tv_sec + ( te.tv_usec - tb.tv_usec ) * 1E-6;
}
|
192cefbc8f05b7feb18faea971909f55b279519f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "pnpoly_cnGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *cs = NULL;
hipMalloc(&cs, XSIZE*YSIZE);
const float *px = NULL;
hipMalloc(&px, XSIZE*YSIZE);
const float *py = NULL;
hipMalloc(&py, XSIZE*YSIZE);
const float *vx = NULL;
hipMalloc(&vx, XSIZE*YSIZE);
const float *vy = NULL;
hipMalloc(&vy, XSIZE*YSIZE);
int npoint = 1;
int nvert = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
pnpoly_cnGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, cs,px,py,vx,vy,npoint,nvert);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
pnpoly_cnGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, cs,px,py,vx,vy,npoint,nvert);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
pnpoly_cnGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, cs,px,py,vx,vy,npoint,nvert);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 192cefbc8f05b7feb18faea971909f55b279519f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "pnpoly_cnGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *cs = NULL;
cudaMalloc(&cs, XSIZE*YSIZE);
const float *px = NULL;
cudaMalloc(&px, XSIZE*YSIZE);
const float *py = NULL;
cudaMalloc(&py, XSIZE*YSIZE);
const float *vx = NULL;
cudaMalloc(&vx, XSIZE*YSIZE);
const float *vy = NULL;
cudaMalloc(&vy, XSIZE*YSIZE);
int npoint = 1;
int nvert = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
pnpoly_cnGPU<<<gridBlock,threadBlock>>>(cs,px,py,vx,vy,npoint,nvert);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
pnpoly_cnGPU<<<gridBlock,threadBlock>>>(cs,px,py,vx,vy,npoint,nvert);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
pnpoly_cnGPU<<<gridBlock,threadBlock>>>(cs,px,py,vx,vy,npoint,nvert);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1ace6d32485cc60f231ae3c4b41efa51774f3027.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
// Utilities and system includes
#include <shrUtils.h>
#include <shrQATest.h>
#include <cutil_inline.h>
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high){
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int OPT_N = 4000000;
const int NUM_ITERATIONS = 1;
const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
shrQAStart(argc, argv);
// Start logs
shrSetLogFileName ("BlackScholes.txt");
shrLog("%s Starting...\n\n", argv[0]);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
double
delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime;
unsigned int hTimer;
int i, devID;
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("No CUDA Capable devices found, exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_WAIVED);
}
} else {
hipSetDevice( devID = cutGetMaxGflopsDeviceId() );
}
cutilCheckError( cutCreateTimer(&hTimer) );
shrLog("Initializing data...\n");
shrLog("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(OPT_SZ);
h_PutResultCPU = (float *)malloc(OPT_SZ);
h_CallResultGPU = (float *)malloc(OPT_SZ);
h_PutResultGPU = (float *)malloc(OPT_SZ);
h_StockPrice = (float *)malloc(OPT_SZ);
h_OptionStrike = (float *)malloc(OPT_SZ);
h_OptionYears = (float *)malloc(OPT_SZ);
shrLog("...allocating GPU memory for options.\n");
cutilSafeCall( hipMalloc((void **)&d_CallResult, OPT_SZ) );
cutilSafeCall( hipMalloc((void **)&d_PutResult, OPT_SZ) );
cutilSafeCall( hipMalloc((void **)&d_StockPrice, OPT_SZ) );
cutilSafeCall( hipMalloc((void **)&d_OptionStrike, OPT_SZ) );
cutilSafeCall( hipMalloc((void **)&d_OptionYears, OPT_SZ) );
shrLog("...generating input data in CPU mem.\n");
srand(5347);
//Generate options set
for(i = 0; i < OPT_N; i++){
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
shrLog("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
cutilSafeCall( hipMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, hipMemcpyHostToDevice) );
shrLog("Data init done.\n\n");
shrLog("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
cutilSafeCall( cutilDeviceSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
for(i = 0; i < NUM_ITERATIONS; i++){
hipLaunchKernelGGL(( BlackScholesGPU), dim3(480), dim3(128), 0, 0,
d_CallResult,
d_PutResult,
d_StockPrice,
d_OptionStrike,
d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
cutilCheckMsg("BlackScholesGPU() execution failed\n");
}
cutilSafeCall( cutilDeviceSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
gpuTime = cutGetTimerValue(hTimer) / NUM_ITERATIONS;
//Both call and put is calculated
shrLog("Options count : %i \n", 2 * OPT_N);
shrLog("BlackScholesGPU() time : %f msec\n", gpuTime);
shrLog("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
shrLog("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
shrLogEx(LOGBOTH | MASTER, 0, "BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
(((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 128);
shrLog("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
cutilSafeCall( hipMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, hipMemcpyDeviceToHost) );
cutilSafeCall( hipMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, hipMemcpyDeviceToHost) );
shrLog("Checking the results...\n");
shrLog("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
shrLog("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for(i = 0; i < OPT_N; i++){
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if(delta > max_delta) max_delta = delta;
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
shrLog("L1 norm: %E\n", L1norm);
shrLog("Max absolute error: %E\n\n", max_delta);
shrLog("Shutting down...\n");
shrLog("...releasing GPU memory.\n");
cutilSafeCall( hipFree(d_OptionYears) );
cutilSafeCall( hipFree(d_OptionStrike) );
cutilSafeCall( hipFree(d_StockPrice) );
cutilSafeCall( hipFree(d_PutResult) );
cutilSafeCall( hipFree(d_CallResult) );
shrLog("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
cutilCheckError( cutDeleteTimer(hTimer) );
shrLog("Shutdown done.\n");
printf("\n[BlackScholes] - Test Summary\n");
cutilDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (L1norm < 1e-6) ? QA_PASSED : QA_FAILED);
}
| 1ace6d32485cc60f231ae3c4b41efa51774f3027.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
// Utilities and system includes
#include <shrUtils.h>
#include <shrQATest.h>
#include <cutil_inline.h>
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high){
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int OPT_N = 4000000;
const int NUM_ITERATIONS = 1;
const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
shrQAStart(argc, argv);
// Start logs
shrSetLogFileName ("BlackScholes.txt");
shrLog("%s Starting...\n\n", argv[0]);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
double
delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime;
unsigned int hTimer;
int i, devID;
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("No CUDA Capable devices found, exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_WAIVED);
}
} else {
cudaSetDevice( devID = cutGetMaxGflopsDeviceId() );
}
cutilCheckError( cutCreateTimer(&hTimer) );
shrLog("Initializing data...\n");
shrLog("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(OPT_SZ);
h_PutResultCPU = (float *)malloc(OPT_SZ);
h_CallResultGPU = (float *)malloc(OPT_SZ);
h_PutResultGPU = (float *)malloc(OPT_SZ);
h_StockPrice = (float *)malloc(OPT_SZ);
h_OptionStrike = (float *)malloc(OPT_SZ);
h_OptionYears = (float *)malloc(OPT_SZ);
shrLog("...allocating GPU memory for options.\n");
cutilSafeCall( cudaMalloc((void **)&d_CallResult, OPT_SZ) );
cutilSafeCall( cudaMalloc((void **)&d_PutResult, OPT_SZ) );
cutilSafeCall( cudaMalloc((void **)&d_StockPrice, OPT_SZ) );
cutilSafeCall( cudaMalloc((void **)&d_OptionStrike, OPT_SZ) );
cutilSafeCall( cudaMalloc((void **)&d_OptionYears, OPT_SZ) );
shrLog("...generating input data in CPU mem.\n");
srand(5347);
//Generate options set
for(i = 0; i < OPT_N; i++){
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
shrLog("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
cutilSafeCall( cudaMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, cudaMemcpyHostToDevice) );
shrLog("Data init done.\n\n");
shrLog("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
cutilSafeCall( cutilDeviceSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
for(i = 0; i < NUM_ITERATIONS; i++){
BlackScholesGPU<<<480, 128>>>(
d_CallResult,
d_PutResult,
d_StockPrice,
d_OptionStrike,
d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
cutilCheckMsg("BlackScholesGPU() execution failed\n");
}
cutilSafeCall( cutilDeviceSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
gpuTime = cutGetTimerValue(hTimer) / NUM_ITERATIONS;
//Both call and put is calculated
shrLog("Options count : %i \n", 2 * OPT_N);
shrLog("BlackScholesGPU() time : %f msec\n", gpuTime);
shrLog("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
shrLog("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
shrLogEx(LOGBOTH | MASTER, 0, "BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
(((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 128);
shrLog("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
cutilSafeCall( cudaMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, cudaMemcpyDeviceToHost) );
shrLog("Checking the results...\n");
shrLog("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
shrLog("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for(i = 0; i < OPT_N; i++){
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if(delta > max_delta) max_delta = delta;
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
shrLog("L1 norm: %E\n", L1norm);
shrLog("Max absolute error: %E\n\n", max_delta);
shrLog("Shutting down...\n");
shrLog("...releasing GPU memory.\n");
cutilSafeCall( cudaFree(d_OptionYears) );
cutilSafeCall( cudaFree(d_OptionStrike) );
cutilSafeCall( cudaFree(d_StockPrice) );
cutilSafeCall( cudaFree(d_PutResult) );
cutilSafeCall( cudaFree(d_CallResult) );
shrLog("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
cutilCheckError( cutDeleteTimer(hTimer) );
shrLog("Shutdown done.\n");
printf("\n[BlackScholes] - Test Summary\n");
cutilDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (L1norm < 1e-6) ? QA_PASSED : QA_FAILED);
}
|
18724ceb2c8d0b8a7f33494864db09c8d3d02b25.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vector_frac.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const REAL *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
REAL *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
const int offset_y = 1;
const int stride_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vector_frac), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vector_frac), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vector_frac), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 18724ceb2c8d0b8a7f33494864db09c8d3d02b25.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vector_frac.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const REAL *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
REAL *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
const int offset_y = 1;
const int stride_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vector_frac<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vector_frac<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vector_frac<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5e473f76c5bee871c8eb9f591501376c9e33e8dc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "initpopulation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
chromosome *cudaChromo = NULL;
hipMalloc(&cudaChromo, XSIZE*YSIZE);
int seed = 2;
const int numofeles = 1;
int *devValue = NULL;
hipMalloc(&devValue, XSIZE*YSIZE);
int *devWeight = NULL;
hipMalloc(&devWeight, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
initpopulation), dim3(gridBlock),dim3(threadBlock), 0, 0, cudaChromo,seed,numofeles,devValue,devWeight);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
initpopulation), dim3(gridBlock),dim3(threadBlock), 0, 0, cudaChromo,seed,numofeles,devValue,devWeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
initpopulation), dim3(gridBlock),dim3(threadBlock), 0, 0, cudaChromo,seed,numofeles,devValue,devWeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5e473f76c5bee871c8eb9f591501376c9e33e8dc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "initpopulation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
chromosome *cudaChromo = NULL;
cudaMalloc(&cudaChromo, XSIZE*YSIZE);
int seed = 2;
const int numofeles = 1;
int *devValue = NULL;
cudaMalloc(&devValue, XSIZE*YSIZE);
int *devWeight = NULL;
cudaMalloc(&devWeight, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
initpopulation<<<gridBlock,threadBlock>>>(cudaChromo,seed,numofeles,devValue,devWeight);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
initpopulation<<<gridBlock,threadBlock>>>(cudaChromo,seed,numofeles,devValue,devWeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
initpopulation<<<gridBlock,threadBlock>>>(cudaChromo,seed,numofeles,devValue,devWeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
01de9e2862f08394edca454dfabc5d4fa8eedbc9.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cassert>
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include <Common/helper_cuda.h>
//! Pull out matrix and shared memory tile size
const int N = 1 << 10;
const int SHMEM_SIZE = 1 << 10;
//! CUDA Kernel for matrix multiplication
//! __global__ means this is called from the CPU and runs on the GPU
__global__ void matrixMul(const int* __restrict a, const int* __restrict b, int* __restrict c)
{
//! Compute each thread's global row and column index
int tx = threadIdx.x, ty = threadIdx.y;
int col = blockDim.x * blockIdx.x + tx;
int row = blockDim.y * blockIdx.y + ty;
//! Statically allocated shared memory
__shared__ int s_a[SHMEM_SIZE];
__shared__ int s_b[SHMEM_SIZE];
//! Accumulate in temporary variable
int temp = 0;
//! Sweep tile across matrix
for (int i = 0; i < N / blockDim.x; ++i)
{
//! Load in elements for this tile
s_a[ty * blockDim.x + tx] = a[row * N + i * blockDim.x + tx];
s_b[ty * blockDim.x + tx] = b[i * blockDim.x * N + ty * N + col];
//! Wait for both tiles to be loaded in before doing computation
__syncthreads();
//! Do matrix multiplication on the small matrix
for (int j = 0; j < blockDim.x; ++j)
{
temp += s_a[ty * blockDim.x + j] * s_b[tx * blockDim.x + j];
}
//! Wait for all threads to finish using current tiles before loading in new ones
__syncthreads();
}
c[row * N + col] = temp;
}
void verify_result(const std::vector<int>& a, const std::vector<int>& b, const std::vector<int>& c, int N)
{
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < N; ++j)
{
int temp = 0;
for (int k = 0; k < N; ++k)
{
temp += a[i * N + k] * b[k * N + j];
}
assert(temp == c[i * N + j]);
}
}
}
void transpose(const std::vector<int>& a, std::vector<int>& a_transposed, int N)
{
assert(a_transposed.size() == N * N && "a_transposed must be resized "
"as N^2 before passing this function");
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < N; ++j)
{
a_transposed[j * N + i] = a[i * N + j];
}
}
}
int main()
{
//! matrix size of 2^20 elements
constexpr size_t bytes = sizeof(int) * N * N;
//! vectors for holding the host-side data
std::vector<int> a, b, c, b_transposed;
a.reserve(N * N);
b.reserve(N * N);
c.reserve(N * N);
//! Initialize random numbers in each array
for (size_t i = 0; i < N * N; ++i)
{
a.push_back(rand() % 100);
b.push_back(rand() % 100);
}
//! Transpose the B matrix before launching kernel
b_transposed.resize(N * N);
transpose(b, b_transposed, N);
//! Allocate memory on the device
int* d_a, * d_b, * d_c;
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
//! Copy data from the host to the device(CPU -> GPU)
hipMemcpy(d_a, a.data(), bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, b_transposed.data(), bytes, hipMemcpyHostToDevice);
//! Threads per CTA
int THREADS = 32;
//! Blocks per grid dimension (assume THREADS divides N evenly)
int BLOCKS = N / THREADS;
//! Use dim3 structs for block and grid dimensions
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
//! Launch Kernel
matrixMul << <blocks, threads >> > (d_a, d_b, d_c);
//! Copy sum vector from device to host
//! hipMemcpy is a synchronous operation, and waits for the prior kernel
//! launch to complete(both go to the default stream in this case).
//! Therefore, this hipMemcpy acts as both a memcpy and synchronization
//! barrier
hipMemcpy(c.data(), d_c, bytes, hipMemcpyDeviceToHost);
//! Check result for errors
verify_result(a, b, c, N);
//! Free memory on device
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
std::cout << "COMPLETED SUCCESSFULLY" << std::endl;
return 0;
} | 01de9e2862f08394edca454dfabc5d4fa8eedbc9.cu | #include <algorithm>
#include <cassert>
#include <iostream>
#include <vector>
#include <cuda_runtime.h>
#include <Common/helper_cuda.h>
//! Pull out matrix and shared memory tile size
const int N = 1 << 10;
const int SHMEM_SIZE = 1 << 10;
//! CUDA Kernel for matrix multiplication
//! __global__ means this is called from the CPU and runs on the GPU
__global__ void matrixMul(const int* __restrict a, const int* __restrict b, int* __restrict c)
{
//! Compute each thread's global row and column index
int tx = threadIdx.x, ty = threadIdx.y;
int col = blockDim.x * blockIdx.x + tx;
int row = blockDim.y * blockIdx.y + ty;
//! Statically allocated shared memory
__shared__ int s_a[SHMEM_SIZE];
__shared__ int s_b[SHMEM_SIZE];
//! Accumulate in temporary variable
int temp = 0;
//! Sweep tile across matrix
for (int i = 0; i < N / blockDim.x; ++i)
{
//! Load in elements for this tile
s_a[ty * blockDim.x + tx] = a[row * N + i * blockDim.x + tx];
s_b[ty * blockDim.x + tx] = b[i * blockDim.x * N + ty * N + col];
//! Wait for both tiles to be loaded in before doing computation
__syncthreads();
//! Do matrix multiplication on the small matrix
for (int j = 0; j < blockDim.x; ++j)
{
temp += s_a[ty * blockDim.x + j] * s_b[tx * blockDim.x + j];
}
//! Wait for all threads to finish using current tiles before loading in new ones
__syncthreads();
}
c[row * N + col] = temp;
}
void verify_result(const std::vector<int>& a, const std::vector<int>& b, const std::vector<int>& c, int N)
{
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < N; ++j)
{
int temp = 0;
for (int k = 0; k < N; ++k)
{
temp += a[i * N + k] * b[k * N + j];
}
assert(temp == c[i * N + j]);
}
}
}
void transpose(const std::vector<int>& a, std::vector<int>& a_transposed, int N)
{
assert(a_transposed.size() == N * N && "a_transposed must be resized "
"as N^2 before passing this function");
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < N; ++j)
{
a_transposed[j * N + i] = a[i * N + j];
}
}
}
int main()
{
//! matrix size of 2^20 elements
constexpr size_t bytes = sizeof(int) * N * N;
//! vectors for holding the host-side data
std::vector<int> a, b, c, b_transposed;
a.reserve(N * N);
b.reserve(N * N);
c.reserve(N * N);
//! Initialize random numbers in each array
for (size_t i = 0; i < N * N; ++i)
{
a.push_back(rand() % 100);
b.push_back(rand() % 100);
}
//! Transpose the B matrix before launching kernel
b_transposed.resize(N * N);
transpose(b, b_transposed, N);
//! Allocate memory on the device
int* d_a, * d_b, * d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
//! Copy data from the host to the device(CPU -> GPU)
cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b_transposed.data(), bytes, cudaMemcpyHostToDevice);
//! Threads per CTA
int THREADS = 32;
//! Blocks per grid dimension (assume THREADS divides N evenly)
int BLOCKS = N / THREADS;
//! Use dim3 structs for block and grid dimensions
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
//! Launch Kernel
matrixMul << <blocks, threads >> > (d_a, d_b, d_c);
//! Copy sum vector from device to host
//! cudaMemcpy is a synchronous operation, and waits for the prior kernel
//! launch to complete(both go to the default stream in this case).
//! Therefore, this cudaMemcpy acts as both a memcpy and synchronization
//! barrier
cudaMemcpy(c.data(), d_c, bytes, cudaMemcpyDeviceToHost);
//! Check result for errors
verify_result(a, b, c, N);
//! Free memory on device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
std::cout << "COMPLETED SUCCESSFULLY" << std::endl;
return 0;
} |
bb09a1d8964e2aac8fb7e2927e2bf780c29c7561.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "CorrelateDataSubtract_1d.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int nthreads = 1;
int num = 1;
int item = 1;
int topwidth = XSIZE;
int topheight = YSIZE;
int topchannels = 1;
int topcount = 1;
int max_displacement = 1;
int x_shift = 1;
int neighborhood_grid_width = XSIZE;
int kernel_radius = 1;
int stride1 = 2;
int stride2 = 2;
int bottomwidth = XSIZE;
int bottomheight = YSIZE;
int bottomchannels = 1;
const float *bottom0 = NULL;
hipMalloc(&bottom0, XSIZE*YSIZE);
const float *bottom1 = NULL;
hipMalloc(&bottom1, XSIZE*YSIZE);
float *top = NULL;
hipMalloc(&top, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
CorrelateDataSubtract_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,num,item,topwidth,topheight,topchannels,topcount,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,bottomchannels,bottom0,bottom1,top);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
CorrelateDataSubtract_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,num,item,topwidth,topheight,topchannels,topcount,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,bottomchannels,bottom0,bottom1,top);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
CorrelateDataSubtract_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,num,item,topwidth,topheight,topchannels,topcount,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,bottomchannels,bottom0,bottom1,top);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bb09a1d8964e2aac8fb7e2927e2bf780c29c7561.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "CorrelateDataSubtract_1d.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int nthreads = 1;
int num = 1;
int item = 1;
int topwidth = XSIZE;
int topheight = YSIZE;
int topchannels = 1;
int topcount = 1;
int max_displacement = 1;
int x_shift = 1;
int neighborhood_grid_width = XSIZE;
int kernel_radius = 1;
int stride1 = 2;
int stride2 = 2;
int bottomwidth = XSIZE;
int bottomheight = YSIZE;
int bottomchannels = 1;
const float *bottom0 = NULL;
cudaMalloc(&bottom0, XSIZE*YSIZE);
const float *bottom1 = NULL;
cudaMalloc(&bottom1, XSIZE*YSIZE);
float *top = NULL;
cudaMalloc(&top, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
CorrelateDataSubtract_1d<<<gridBlock,threadBlock>>>(nthreads,num,item,topwidth,topheight,topchannels,topcount,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,bottomchannels,bottom0,bottom1,top);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
CorrelateDataSubtract_1d<<<gridBlock,threadBlock>>>(nthreads,num,item,topwidth,topheight,topchannels,topcount,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,bottomchannels,bottom0,bottom1,top);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
CorrelateDataSubtract_1d<<<gridBlock,threadBlock>>>(nthreads,num,item,topwidth,topheight,topchannels,topcount,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,bottomchannels,bottom0,bottom1,top);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6744a76b2e1d7eacf86234d3f7d3be1da1a4afde.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper utility functions
__global__ void increment_kernel(int *g_data, int inc_value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + inc_value;
}
int correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return 0;
}
return 1;
}
int main(int argc, char *argv[])
{
int devID;
hipDeviceProp_t deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
int n = 16 * 1024 * 1024;
int nbytes = n * sizeof(int);
int value = 26;
// allocate host memory
int *a = 0;
checkCudaErrors(hipHostMalloc((void **)&a, nbytes));
memset(a, 0, nbytes);
// allocate device memory
int *d_a=0;
checkCudaErrors(hipMalloc((void **)&d_a, nbytes));
checkCudaErrors(hipMemset(d_a, 255, nbytes));
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// create cuda event handles
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(hipDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
hipEventRecord(start, 0);
hipMemcpyAsync(d_a, a, nbytes, hipMemcpyHostToDevice, 0);
hipLaunchKernelGGL(( increment_kernel), dim3(blocks), dim3(threads), 0, 0, d_a, value);
hipMemcpyAsync(a, d_a, nbytes, hipMemcpyDeviceToHost, 0);
hipEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (hipEventQuery(stop) == hipErrorNotReady)
{
counter++;
}
checkCudaErrors(hipEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// check the output for correctness
bool bFinalResults = (bool)correct_output(a, n, value);
// release resources
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipHostFree(a));
checkCudaErrors(hipFree(d_a));
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
| 6744a76b2e1d7eacf86234d3f7d3be1da1a4afde.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper utility functions
__global__ void increment_kernel(int *g_data, int inc_value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + inc_value;
}
int correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return 0;
}
return 1;
}
int main(int argc, char *argv[])
{
int devID;
cudaDeviceProp deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
int n = 16 * 1024 * 1024;
int nbytes = n * sizeof(int);
int value = 26;
// allocate host memory
int *a = 0;
checkCudaErrors(cudaMallocHost((void **)&a, nbytes));
memset(a, 0, nbytes);
// allocate device memory
int *d_a=0;
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes));
checkCudaErrors(cudaMemset(d_a, 255, nbytes));
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// create cuda event handles
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(cudaDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
cudaMemcpyAsync(d_a, a, nbytes, cudaMemcpyHostToDevice, 0);
increment_kernel<<<blocks, threads, 0, 0>>>(d_a, value);
cudaMemcpyAsync(a, d_a, nbytes, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// check the output for correctness
bool bFinalResults = (bool)correct_output(a, n, value);
// release resources
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFreeHost(a));
checkCudaErrors(cudaFree(d_a));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
f301967cb6dbdfd0c0d4ae1e6f1f1315a1be5b23.hip | // !!! This is a file automatically generated by hipify!!!
//
// Program to solve Laplace equation on a regular 3D grid
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 16
#define BLOCK_Y 8
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include <laplace3d_kernel.h>
////////////////////////////////////////////////////////////////////////
// declare Gold routine
////////////////////////////////////////////////////////////////////////
void Gold_laplace3d(int NX, int NY, int NZ, float* h_u1, float* h_u2);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// 'h_' prefix - CPU (host) memory space
int NX=256, NY=256, NZ=256, REPEAT=10,
bx, by, i, j, k, ind;
float *h_u1, *h_u2, *h_u3, *h_foo, err;
// 'd_' prefix - GPU (device) memory space
float *d_u1, *d_u2, *d_foo;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// allocate memory for arrays
h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u3 = (float *)malloc(sizeof(float)*NX*NY*NZ);
checkCudaErrors( hipMalloc((void **)&d_u1, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_u2, sizeof(float)*NX*NY*NZ) );
// initialise u1
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
hipEventRecord(start);
checkCudaErrors( hipMemcpy(d_u1, h_u1, sizeof(float)*NX*NY*NZ,
hipMemcpyHostToDevice) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\nCopy u1 to device: %.1f (ms) \n", milli);
// Set up the execution configuration
bx = 1 + (NX-1)/BLOCK_X;
by = 1 + (NY-1)/BLOCK_Y;
dim3 dimGrid(bx,by);
dim3 dimBlock(BLOCK_X,BLOCK_Y);
// printf("\n dimGrid = %d %d %d \n",dimGrid.x,dimGrid.y,dimGrid.z);
// printf(" dimBlock = %d %d %d \n",dimBlock.x,dimBlock.y,dimBlock.z);
// Execute GPU kernel
hipEventRecord(start);
for (i = 1; i <= REPEAT; ++i) {
hipLaunchKernelGGL(( GPU_laplace3d), dim3(dimGrid), dim3(dimBlock), 0, 0, NX, NY, NZ, d_u1, d_u2);
getLastCudaError("GPU_laplace3d execution failed\n");
d_foo = d_u1; d_u1 = d_u2; d_u2 = d_foo; // swap d_u1 and d_u2
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\n%dx GPU_laplace3d_naive: %.1f (ms) \n", REPEAT, milli);
// Read back GPU results
hipEventRecord(start);
checkCudaErrors( hipMemcpy(h_u2, d_u1, sizeof(float)*NX*NY*NZ,
hipMemcpyDeviceToHost) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\nCopy u2 to host: %.1f (ms) \n", milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// Gold treatment
hipEventRecord(start);
for (int i = 1; i <= REPEAT; ++i) {
Gold_laplace3d(NX, NY, NZ, h_u1, h_u3);
h_foo = h_u1; h_u1 = h_u3; h_u3 = h_foo; // swap h_u1 and h_u3
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\n%dx Gold_laplace3d: %.1f (ms) \n \n", REPEAT, milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// error check
err = 0.0;
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]);
}
}
}
printf("rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ)));
// Release GPU and CPU memory
checkCudaErrors( hipFree(d_u1) );
checkCudaErrors( hipFree(d_u2) );
free(h_u1);
free(h_u2);
free(h_u3);
hipDeviceReset();
}
| f301967cb6dbdfd0c0d4ae1e6f1f1315a1be5b23.cu | //
// Program to solve Laplace equation on a regular 3D grid
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 16
#define BLOCK_Y 8
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include <laplace3d_kernel.h>
////////////////////////////////////////////////////////////////////////
// declare Gold routine
////////////////////////////////////////////////////////////////////////
void Gold_laplace3d(int NX, int NY, int NZ, float* h_u1, float* h_u2);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// 'h_' prefix - CPU (host) memory space
int NX=256, NY=256, NZ=256, REPEAT=10,
bx, by, i, j, k, ind;
float *h_u1, *h_u2, *h_u3, *h_foo, err;
// 'd_' prefix - GPU (device) memory space
float *d_u1, *d_u2, *d_foo;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate memory for arrays
h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u3 = (float *)malloc(sizeof(float)*NX*NY*NZ);
checkCudaErrors( cudaMalloc((void **)&d_u1, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_u2, sizeof(float)*NX*NY*NZ) );
// initialise u1
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
cudaEventRecord(start);
checkCudaErrors( cudaMemcpy(d_u1, h_u1, sizeof(float)*NX*NY*NZ,
cudaMemcpyHostToDevice) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\nCopy u1 to device: %.1f (ms) \n", milli);
// Set up the execution configuration
bx = 1 + (NX-1)/BLOCK_X;
by = 1 + (NY-1)/BLOCK_Y;
dim3 dimGrid(bx,by);
dim3 dimBlock(BLOCK_X,BLOCK_Y);
// printf("\n dimGrid = %d %d %d \n",dimGrid.x,dimGrid.y,dimGrid.z);
// printf(" dimBlock = %d %d %d \n",dimBlock.x,dimBlock.y,dimBlock.z);
// Execute GPU kernel
cudaEventRecord(start);
for (i = 1; i <= REPEAT; ++i) {
GPU_laplace3d<<<dimGrid, dimBlock>>>(NX, NY, NZ, d_u1, d_u2);
getLastCudaError("GPU_laplace3d execution failed\n");
d_foo = d_u1; d_u1 = d_u2; d_u2 = d_foo; // swap d_u1 and d_u2
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\n%dx GPU_laplace3d_naive: %.1f (ms) \n", REPEAT, milli);
// Read back GPU results
cudaEventRecord(start);
checkCudaErrors( cudaMemcpy(h_u2, d_u1, sizeof(float)*NX*NY*NZ,
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\nCopy u2 to host: %.1f (ms) \n", milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// Gold treatment
cudaEventRecord(start);
for (int i = 1; i <= REPEAT; ++i) {
Gold_laplace3d(NX, NY, NZ, h_u1, h_u3);
h_foo = h_u1; h_u1 = h_u3; h_u3 = h_foo; // swap h_u1 and h_u3
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\n%dx Gold_laplace3d: %.1f (ms) \n \n", REPEAT, milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// error check
err = 0.0;
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]);
}
}
}
printf("rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ)));
// Release GPU and CPU memory
checkCudaErrors( cudaFree(d_u1) );
checkCudaErrors( cudaFree(d_u2) );
free(h_u1);
free(h_u2);
free(h_u3);
cudaDeviceReset();
}
|
bea7612f2bc2f41863a5b604f50f62b53c18cf5a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void xKernel(double *alpha, double *beta, double * x)
{
int tid = threadIdx.x;
const int n = 6;
const int p = (int) (n / 2);
x[p - 1] = (beta[p - 1] + alpha[p - 1] * beta[p + 1]) / (1 - alpha[p + 1] * alpha[p - 1]);
if (tid == 0)
for (int i = p - 2; i >= 0; i--)
{
x[i] = alpha[i] * x[i + 1] + beta[i];
}
if (tid == 1)
for (int i = p; i < n; i++)
{
x[i] = alpha[i + 1] * x[i - 1] + beta[i + 1];
}
}
__global__ void alphaBetaKernel(double *alpha, double *beta)
{
int tid = threadIdx.x;
const int n = 6;
const int p = (int) (n / 2);
const double a[n][n + 1] = { { 10, 4, 0, 0, 0, 0, 1 },
{ 1, 10, 9, 0, 0, 0, 3 },
{ 0, 3, 10, 8, 0, 0, 5 },
{ 0, 0, 2, 10, 8, 0, 1 },
{ 0, 0, 0, 2, 10, 1, 6 },
{ 0, 0, 0, 0, 2, 10, 1 } };
alpha[0] = -a[0][1] / a[0][0];
beta[0] = a[0][n] / a[0][0];
alpha[n] = -a[n - 1][n - 2] / a[n - 1][n - 1];
beta[n] = a[n - 1][n] / a[n - 1][n - 1];
if (tid == 0)
for (int i = 0; i < p - 1; i++)
{
alpha[i + 1] = -a[i + 1][i + 2] / (a[i + 1][i] * alpha[i] + a[i + 1][i + 1]);
beta[i + 1] = (a[i + 1][n] - a[i + 1][i] * beta[i]) / (a[i + 1][i] * alpha[i] + a[i + 1][i + 1]);
}
if (tid == 1)
for (int i = n - 1; i > p - 1; i--)
{
alpha[i] = -a[i - 1][i - 2] / (a[i - 1][i] * alpha[i + 1] + a[i - 1][i - 1]);
beta[i] = (a[i - 1][n] - a[i - 1][i] * beta[i + 1]) / (a[i - 1][i] * alpha[i + 1] + a[i - 1][i - 1]);
}
}
int main()
{
const int n = 6;
double *alphaCuda = NULL;
double *betaCuda = NULL;
hipMalloc((void**) &alphaCuda, (n + 1) * sizeof(double));
hipMalloc((void**) &betaCuda, (n + 1) * sizeof(double));
hipLaunchKernelGGL(( alphaBetaKernel) , dim3(1), dim3(2) , 0, 0, alphaCuda, betaCuda);
double x[n];
double *xCuda = NULL;
hipMalloc((void**) &xCuda, n * sizeof(double));
xKernel << <1, 2 >> >(alphaCuda, betaCuda, xCuda);
hipMemcpy(&x, xCuda, n * sizeof(double), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
{
cout << x[i] << endl;
}
hipFree(alphaCuda);
hipFree(betaCuda);
hipFree(xCuda);
return 0;
}
| bea7612f2bc2f41863a5b604f50f62b53c18cf5a.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void xKernel(double *alpha, double *beta, double * x)
{
int tid = threadIdx.x;
const int n = 6;
const int p = (int) (n / 2);
x[p - 1] = (beta[p - 1] + alpha[p - 1] * beta[p + 1]) / (1 - alpha[p + 1] * alpha[p - 1]);
if (tid == 0)
for (int i = p - 2; i >= 0; i--)
{
x[i] = alpha[i] * x[i + 1] + beta[i];
}
if (tid == 1)
for (int i = p; i < n; i++)
{
x[i] = alpha[i + 1] * x[i - 1] + beta[i + 1];
}
}
__global__ void alphaBetaKernel(double *alpha, double *beta)
{
int tid = threadIdx.x;
const int n = 6;
const int p = (int) (n / 2);
const double a[n][n + 1] = { { 10, 4, 0, 0, 0, 0, 1 },
{ 1, 10, 9, 0, 0, 0, 3 },
{ 0, 3, 10, 8, 0, 0, 5 },
{ 0, 0, 2, 10, 8, 0, 1 },
{ 0, 0, 0, 2, 10, 1, 6 },
{ 0, 0, 0, 0, 2, 10, 1 } };
alpha[0] = -a[0][1] / a[0][0];
beta[0] = a[0][n] / a[0][0];
alpha[n] = -a[n - 1][n - 2] / a[n - 1][n - 1];
beta[n] = a[n - 1][n] / a[n - 1][n - 1];
if (tid == 0)
for (int i = 0; i < p - 1; i++)
{
alpha[i + 1] = -a[i + 1][i + 2] / (a[i + 1][i] * alpha[i] + a[i + 1][i + 1]);
beta[i + 1] = (a[i + 1][n] - a[i + 1][i] * beta[i]) / (a[i + 1][i] * alpha[i] + a[i + 1][i + 1]);
}
if (tid == 1)
for (int i = n - 1; i > p - 1; i--)
{
alpha[i] = -a[i - 1][i - 2] / (a[i - 1][i] * alpha[i + 1] + a[i - 1][i - 1]);
beta[i] = (a[i - 1][n] - a[i - 1][i] * beta[i + 1]) / (a[i - 1][i] * alpha[i + 1] + a[i - 1][i - 1]);
}
}
int main()
{
const int n = 6;
double *alphaCuda = NULL;
double *betaCuda = NULL;
cudaMalloc((void**) &alphaCuda, (n + 1) * sizeof(double));
cudaMalloc((void**) &betaCuda, (n + 1) * sizeof(double));
alphaBetaKernel <<<1, 2 >>>(alphaCuda, betaCuda);
double x[n];
double *xCuda = NULL;
cudaMalloc((void**) &xCuda, n * sizeof(double));
xKernel << <1, 2 >> >(alphaCuda, betaCuda, xCuda);
cudaMemcpy(&x, xCuda, n * sizeof(double), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
{
cout << x[i] << endl;
}
cudaFree(alphaCuda);
cudaFree(betaCuda);
cudaFree(xCuda);
return 0;
}
|
4d69492853dbbc763deb17eaa99582acba54c93c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
void SaveDeviceDataToFile(float* d_data, int data_size, char filename[])
{
float* data = (float *)malloc(sizeof(float)*data_size);
hipMemcpy(data,d_data,sizeof(float)*data_size,hipMemcpyDefault);
FILE *fp;
if ( (fp = fopen(filename,"wb")) == NULL )
{
printf("can not open file to write data %s\n",filename);
exit(0);
}
fwrite(data,sizeof(float)*data_size,1,fp);
fclose(fp);
printf("Data saved in file %s \n",filename);
}
double L2_norm_cpu(const float *a, const float *b, int SIZE)
{
double sum=0;
for (int i=0;i<SIZE;i++)
sum = sum + (a[i]-b[i])*(a[i]-b[i]);
//sum = sqrt(sum);
return sum;
}
double L2_norm_gpu(float *d_a, float *d_b) // h_b is pinned host memory
{
double norm_result = 0.0f;
dim3 dimBlock_sub(R/2);
dim3 dimGrid_sub(2,Z_prj,Nviews);
hipLaunchKernelGGL(( L2_norm_kernel), dim3(dimGrid_sub), dim3(dimBlock_sub), 0, 0, d_a, d_b);
/* CPU summation */
// float *norm_result_temp = (float *)malloc(sizeof(float)*R*Z_prj*Nviews);
// bzero(norm_result_temp, sizeof(float)*R*Z_prj*Nviews);
// hipMemcpy(norm_result_temp, d_a, d_proj_data_size, hipMemcpyDeviceToHost);
// for (int i=0;i<Z_prj*R*Nviews; i++)
// norm_result = norm_result + (double)norm_result_temp[i];
// free(norm_result_temp);
// cout<<" * L2 norm cpu test = "<<norm_result<<endl;
/* GPU summation */
float result_temp;
float *d_result = NULL;
hipMalloc((void**)&d_result, sizeof(float)*1);
hipMemset(d_result, 0, sizeof(float)*1);
hipLaunchKernelGGL(( getSum_kernel), dim3(1),dim3(1024),1024*sizeof(float), 0, d_a,d_result,R*Z_prj*Nviews);
hipMemcpy(&result_temp, d_result, sizeof(float)*1, hipMemcpyDeviceToHost);
hipFree(d_result);
norm_result=(double)result_temp;
return norm_result;
}
double TV_norm_gpu(float *d_volume)
{
size_t size_volume = sizeof(float)*M*N*ZETA;
float *d_volume_tv = NULL;
hipMalloc((void**)&d_volume_tv, size_volume);
hipMemset(d_volume_tv, 0, size_volume);
dim3 dimblock_tv(M/2);
dim3 dimgrid_tv(2,N,ZETA);
hipLaunchKernelGGL(( TV_norm_kernel), dim3(dimgrid_tv), dim3(dimblock_tv), 0, 0, d_volume_tv, d_volume);
// Note: To calculate the tv matrix
// TV norm definition : See Amir Beck's paper: Fast Gradient-based algorithms for constrained total variation image denoising and deblurring problems */
hipDeviceSynchronize();
double result = 0.0;
/*CPU summation*/
// float *volume_tv = (float *)malloc(sizeof(float)*M*N*ZETA);
// bzero(volume_tv, sizeof(float)*M*N*ZETA);
// hipMemcpy(volume_tv, d_volume_tv, size_volume, hipMemcpyDeviceToHost);
// for (int i=0; i<M*N*ZETA; i++)
// result+=(double)volume_tv[i];
// free(volume_tv);
/* GPU summation */
float *d_result = NULL;
float result_temp;
hipMalloc((void**)&d_result, sizeof(float)*1);
hipLaunchKernelGGL(( getSum_kernel), dim3(1),dim3(1024),1024*sizeof(float), 0, d_volume_tv,d_result,M*N*ZETA);
hipMemcpy(&result_temp, d_result, sizeof(float)*1, hipMemcpyDeviceToHost);
hipFree(d_result);
result = (double)result_temp;
hipFree(d_volume_tv);
return result;
}
double TV_norm_cpu(float *d_volume)
{
float* d_TV = (float *)malloc(sizeof(float)*M*N*ZETA);
bzero(d_TV,sizeof(float)*M*N*ZETA);
#pragma omp parallel for
for (int i=0; i<M; i++)
for (int j=0; j<N; j++)
for (int k=0; k<ZETA; k++)
{
int x = M*N*k+M*j+i;
int a = M*N*k+M*j+(i+1);
int b = M*N*k+M*(j+1)+i;
int c = M*N*(k+1)+M*j+i;
if ((i<M-1)&&(j<N-1)&&(k<ZETA-1))
d_TV[x]=sqrt( (d_volume[x]-d_volume[a])*(d_volume[x]-d_volume[a])+(d_volume[x]-d_volume[b])*(d_volume[x]-d_volume[b])+(d_volume[x]-d_volume[c])*(d_volume[x]-d_volume[c]) );
else if ((i==M-1)&&(j<N-1)&&(k<ZETA-1))
d_TV[x]=sqrt( (d_volume[x]-d_volume[b])*(d_volume[x]-d_volume[b])+(d_volume[x]-d_volume[c])*(d_volume[x]-d_volume[c]) );
else if ((i<M-1)&&(j==N-1)&&(k<ZETA-1))
d_TV[x]=sqrt( (d_volume[x]-d_volume[a])*(d_volume[x]-d_volume[a])+(d_volume[x]-d_volume[c])*(d_volume[x]-d_volume[c]) );
else if ((i<M-1)&&(j<N-1)&&(k==ZETA-1))
d_TV[x]=sqrt( (d_volume[x]-d_volume[a])*(d_volume[x]-d_volume[a])+(d_volume[x]-d_volume[b])*(d_volume[x]-d_volume[b]) );
else if ((i==M-1)&&(j==N-1)&&(k<ZETA-1))
d_TV[x]=abs( d_volume[x]-d_volume[c]);
else if ((i==M-1)&&(j<N-1)&&(k==ZETA-1))
d_TV[x]=abs( d_volume[x]-d_volume[b]);
else if ((i<M-1)&&(j==N-1)&&(k==ZETA-1))
d_TV[x]=abs( d_volume[x]-d_volume[a]);
}
double result = 0.0;
for (int i=0; i<M*N*ZETA; i++)
result+=d_TV[i];
free(d_TV);
return result;
}
//A function of check error, it will print out a message regarding the related information.
// Convenience function for checking CUDA runtime API results
// Can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess)
{
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
/*
void tv_gradient_calculate_3d_gpu_host(float *F_SART_temp, float *F_SART_gradient_tv, float epi_temp)
{
//bzero(F_SART_gradient_tv,sizeof(float)*M*N*ZETA);
// if (hipSetDevice(2)!=hipSuccess)
//{
// std::cout<<"Error when initializing device6!"<<std::endl;
// exit(-1);
//}
float *d_volumn_f_sart = NULL;
size_t d_volumn_f_sart_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(hipMalloc((void**)&d_volumn_f_sart, d_volumn_f_sart_size));
//cutilSafeCall(hipMemset(d_volume, 0, d_volume_size) );
cutilSafeCall(hipMemcpy(d_volumn_f_sart, F_SART_temp, d_volumn_f_sart_size, hipMemcpyHostToDevice) );
float *d_volumn_f_sart_gradient_tv = NULL;
size_t d_volumn_f_sart_gradient_tv_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(hipMalloc((void**)&d_volumn_f_sart_gradient_tv, d_volumn_f_sart_gradient_tv_size));
cutilSafeCall(hipMemset(d_volumn_f_sart_gradient_tv, 0, d_volumn_f_sart_gradient_tv_size) );
dim3 dimblock_tv_gradient(M-2);
dim3 dimgrid_tv_gradient(N-2,ZETA-2);
//calculate the tv matrix
tv_gradient_matrix_3d_kernel<<<dimgrid_tv_gradient, dimblock_tv_gradient>>>(d_volumn_f_sart_gradient_tv, d_volumn_f_sart, epi_temp);
cutilCheckMsg("Kernel execution failed");
cutilSafeCall(hipMemcpy(F_SART_gradient_tv, d_volumn_f_sart_gradient_tv, d_volumn_f_sart_gradient_tv_size, hipMemcpyDeviceToHost) );
cutilSafeCall( hipFree(d_volumn_f_sart));
cutilSafeCall( hipFree(d_volumn_f_sart_gradient_tv));
hipDeviceReset();
}
void backtracking_update_host(float *F_temp_update, float *F_temp, float *tv_gradient_matrix_temp, float alpha_k_temp)
{
// if (hipSetDevice(2)!=hipSuccess)
//{
// std::cout<<"Error when initializing device6!"<<std::endl;
// exit(-1);
//}
float *d_volumn_f_update = NULL;
size_t d_volumn_f_update_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(hipMalloc((void**)&d_volumn_f_update, d_volumn_f_update_size));
cutilSafeCall(hipMemcpy(d_volumn_f_update, F_temp_update, d_volumn_f_update_size, hipMemcpyHostToDevice) );
float *d_volumn_f = NULL;
size_t d_volumn_f_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(hipMalloc((void**)&d_volumn_f, d_volumn_f_size));
cutilSafeCall(hipMemcpy(d_volumn_f, F_temp, d_volumn_f_size, hipMemcpyHostToDevice) );
float *d_volumn_tv_gradient_matrix = NULL;
size_t d_volumn_tv_gradient_matrix_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(hipMalloc((void**)&d_volumn_tv_gradient_matrix, d_volumn_tv_gradient_matrix_size));
cutilSafeCall(hipMemcpy(d_volumn_tv_gradient_matrix, tv_gradient_matrix_temp, d_volumn_tv_gradient_matrix_size, hipMemcpyHostToDevice) );
dim3 dimblock_update(M);
dim3 dimgrid_update(N,ZETA);
//calculate the tv matrix
backtracking_update_kernel<<<dimgrid_update, dimblock_update>>>(d_volumn_f_update, d_volumn_f, d_volumn_tv_gradient_matrix, alpha_k_temp);
cutilCheckMsg("Kernel execution failed");
cutilSafeCall(hipMemcpy(F_temp_update, d_volumn_f_update, d_volumn_f_size, hipMemcpyDeviceToHost) );
cutilSafeCall( hipFree(d_volumn_f_update));
cutilSafeCall( hipFree(d_volumn_f));
cutilSafeCall( hipFree(d_volumn_tv_gradient_matrix));
hipDeviceReset();
}
float gradient_f_norm_host(float *tv_gradient_matrix_temp)
{
// if (hipSetDevice(2)!=hipSuccess)
//{
// std::cout<<"Error when initializing device6!"<<std::endl;
// exit(-1);
//}
float *d_volumn_f = NULL;
size_t d_volumn_f_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(hipMalloc((void**)&d_volumn_f, d_volumn_f_size));
//cutilSafeCall(hipMemset(d_volume, 0, d_volume_size) );
cutilSafeCall(hipMemcpy(d_volumn_f, tv_gradient_matrix_temp, d_volumn_f_size, hipMemcpyHostToDevice) );
float *d_volumn_df_l1 = NULL;
size_t d_volumn_df_l1_size = sizeof(float)*N*ZETA;
cutilSafeCall(hipMalloc((void**)&d_volumn_df_l1, d_volumn_df_l1_size));
cutilSafeCall(hipMemset(d_volumn_df_l1, 0, d_volumn_df_l1_size) );
float *d_volumn_df_l2 = NULL;
size_t d_volumn_df_l2_size = sizeof(float)*ZETA;
cutilSafeCall(hipMalloc((void**)&d_volumn_df_l2, d_volumn_df_l2_size));
cutilSafeCall(hipMemset(d_volumn_df_l2, 0, d_volumn_df_l2_size) );
float *d_volumn_df_sum = NULL;
size_t d_volumn_df_sum_size = sizeof(float)*1;
cutilSafeCall(hipMalloc((void**)&d_volumn_df_sum, d_volumn_df_sum_size));
cutilSafeCall(hipMemset(d_volumn_df_sum, 0, d_volumn_df_sum_size) );
float *norm_result_temp = (float *)malloc(sizeof(float)*1);
norm_result_temp[0] = 0.0f;
float norm_result = 0.0f;
dim3 dimblock_norm_l1(M,1,1);
dim3 dimgrid_norm_l1(N,ZETA,1);
dim3 dimblock_norm_l2(N,1,1);
dim3 dimgrid_norm_l2(ZETA,1,1);
dim3 dimblock_norm_sum(ZETA,1,1);
dim3 dimgrid_norm_sum(1,1,1);
//calculate the norm_2
reduce_norm_2_kernel_l1<<<dimgrid_norm_l1, dimblock_norm_l1, sizeof(float)*M>>>(d_volumn_f, d_volumn_df_l1, M*N*ZETA);
reduce_norm_2_kernel_l2<<<dimgrid_norm_l2, dimblock_norm_l2, sizeof(float)*N>>>(d_volumn_df_l1, d_volumn_df_l2, N*ZETA);
reduce_norm_2_kernel_end<<<dimgrid_norm_sum, dimblock_norm_sum, sizeof(float)*ZETA>>>(d_volumn_df_l2, d_volumn_df_sum, ZETA);
cutilCheckMsg("Kernel execution failed");
cutilSafeCall(hipMemcpy(norm_result_temp, d_volumn_df_sum, d_volumn_df_sum_size, hipMemcpyDeviceToHost) );
norm_result = norm_result_temp[0];
//printf("TV value calculation one time \n");
//printf( "Processing time: %f (ms)\n", cutGetTimerValue(timer) );
cutilSafeCall(hipFree(d_volumn_f));
cutilSafeCall( hipFree(d_volumn_df_l1));
cutilSafeCall( hipFree(d_volumn_df_l2));
cutilSafeCall( hipFree(d_volumn_df_sum));
hipDeviceReset();
free(norm_result_temp);
return norm_result;
}
void line_search_host(float *F_SART_temp, float *F_SART_TV_temp, float epi_temp)
{
// if (hipSetDevice(2)!=hipSuccess)
//{
// std::cout<<"Error when initializing device6!"<<std::endl;
// exit(-1);
//}
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
float alpha_k = 1.0f;
float rho = 0.5f;
float c = 0.0001f;
//float epi_temp = 1.0e-8f;
float c_alpha_f_pk = 0.0f;
float f_pk = 0.0f;
//copy the f_sart to f_sart_tv
//then start to do the line search
memcpy(F_SART_TV_temp, F_SART_temp, sizeof(float)*M*N*ZETA);
float *tv_gradient_matrix =(float *)malloc(sizeof(float)*M*N*ZETA);
bzero(tv_gradient_matrix,sizeof(float)*M*N*ZETA);
//calculate the tv gradient matrix
tv_gradient_calculate_3d_gpu_host(F_SART_temp, tv_gradient_matrix, epi_temp);
float tv_value_old;
float tv_value_new;
tv_value_old = tv_value_calculate_3d_gpu_host(F_SART_temp);
backtracking_update_host(F_SART_TV_temp, F_SART_TV_temp, tv_gradient_matrix, alpha_k);
tv_value_new = tv_value_calculate_3d_gpu_host(F_SART_TV_temp);
f_pk =-gradient_f_norm_host(tv_gradient_matrix);
c_alpha_f_pk = c*alpha_k*f_pk;
while (tv_value_new > (tv_value_old + c_alpha_f_pk) )
{
alpha_k = alpha_k *rho;
c_alpha_f_pk = c*alpha_k*f_pk;
backtracking_update_host(F_SART_TV_temp, F_SART_temp, tv_gradient_matrix, alpha_k);
//for(int i=0;i<f_size;i++)
// f_sart_tv[i] = f_sart[i] - alpha_k*tv_gradient_matrix[i];
tv_value_new = tv_value_calculate_3d_gpu_host(F_SART_TV_temp);
}
cutilCheckError(cutStopTimer(timer));
printf("Line search one time \n");
printf( "Processing time: %f (ms)\n", cutGetTimerValue(timer) );
free(tv_gradient_matrix);
}
*/ | 4d69492853dbbc763deb17eaa99582acba54c93c.cu |
void SaveDeviceDataToFile(float* d_data, int data_size, char filename[])
{
float* data = (float *)malloc(sizeof(float)*data_size);
cudaMemcpy(data,d_data,sizeof(float)*data_size,cudaMemcpyDefault);
FILE *fp;
if ( (fp = fopen(filename,"wb")) == NULL )
{
printf("can not open file to write data %s\n",filename);
exit(0);
}
fwrite(data,sizeof(float)*data_size,1,fp);
fclose(fp);
printf("Data saved in file %s \n",filename);
}
double L2_norm_cpu(const float *a, const float *b, int SIZE)
{
double sum=0;
for (int i=0;i<SIZE;i++)
sum = sum + (a[i]-b[i])*(a[i]-b[i]);
//sum = sqrt(sum);
return sum;
}
double L2_norm_gpu(float *d_a, float *d_b) // h_b is pinned host memory
{
double norm_result = 0.0f;
dim3 dimBlock_sub(R/2);
dim3 dimGrid_sub(2,Z_prj,Nviews);
L2_norm_kernel<<<dimGrid_sub, dimBlock_sub>>>(d_a, d_b);
/* CPU summation */
// float *norm_result_temp = (float *)malloc(sizeof(float)*R*Z_prj*Nviews);
// bzero(norm_result_temp, sizeof(float)*R*Z_prj*Nviews);
// cudaMemcpy(norm_result_temp, d_a, d_proj_data_size, cudaMemcpyDeviceToHost);
// for (int i=0;i<Z_prj*R*Nviews; i++)
// norm_result = norm_result + (double)norm_result_temp[i];
// free(norm_result_temp);
// cout<<" * L2 norm cpu test = "<<norm_result<<endl;
/* GPU summation */
float result_temp;
float *d_result = NULL;
cudaMalloc((void**)&d_result, sizeof(float)*1);
cudaMemset(d_result, 0, sizeof(float)*1);
getSum_kernel<<<1,1024,1024*sizeof(float)>>>(d_a,d_result,R*Z_prj*Nviews);
cudaMemcpy(&result_temp, d_result, sizeof(float)*1, cudaMemcpyDeviceToHost);
cudaFree(d_result);
norm_result=(double)result_temp;
return norm_result;
}
double TV_norm_gpu(float *d_volume)
{
size_t size_volume = sizeof(float)*M*N*ZETA;
float *d_volume_tv = NULL;
cudaMalloc((void**)&d_volume_tv, size_volume);
cudaMemset(d_volume_tv, 0, size_volume);
dim3 dimblock_tv(M/2);
dim3 dimgrid_tv(2,N,ZETA);
TV_norm_kernel<<<dimgrid_tv, dimblock_tv>>>(d_volume_tv, d_volume);
// Note: To calculate the tv matrix
// TV norm definition : See Amir Beck's paper: Fast Gradient-based algorithms for constrained total variation image denoising and deblurring problems */
cudaDeviceSynchronize();
double result = 0.0;
/*CPU summation*/
// float *volume_tv = (float *)malloc(sizeof(float)*M*N*ZETA);
// bzero(volume_tv, sizeof(float)*M*N*ZETA);
// cudaMemcpy(volume_tv, d_volume_tv, size_volume, cudaMemcpyDeviceToHost);
// for (int i=0; i<M*N*ZETA; i++)
// result+=(double)volume_tv[i];
// free(volume_tv);
/* GPU summation */
float *d_result = NULL;
float result_temp;
cudaMalloc((void**)&d_result, sizeof(float)*1);
getSum_kernel<<<1,1024,1024*sizeof(float)>>>(d_volume_tv,d_result,M*N*ZETA);
cudaMemcpy(&result_temp, d_result, sizeof(float)*1, cudaMemcpyDeviceToHost);
cudaFree(d_result);
result = (double)result_temp;
cudaFree(d_volume_tv);
return result;
}
double TV_norm_cpu(float *d_volume)
{
float* d_TV = (float *)malloc(sizeof(float)*M*N*ZETA);
bzero(d_TV,sizeof(float)*M*N*ZETA);
#pragma omp parallel for
for (int i=0; i<M; i++)
for (int j=0; j<N; j++)
for (int k=0; k<ZETA; k++)
{
int x = M*N*k+M*j+i;
int a = M*N*k+M*j+(i+1);
int b = M*N*k+M*(j+1)+i;
int c = M*N*(k+1)+M*j+i;
if ((i<M-1)&&(j<N-1)&&(k<ZETA-1))
d_TV[x]=sqrt( (d_volume[x]-d_volume[a])*(d_volume[x]-d_volume[a])+(d_volume[x]-d_volume[b])*(d_volume[x]-d_volume[b])+(d_volume[x]-d_volume[c])*(d_volume[x]-d_volume[c]) );
else if ((i==M-1)&&(j<N-1)&&(k<ZETA-1))
d_TV[x]=sqrt( (d_volume[x]-d_volume[b])*(d_volume[x]-d_volume[b])+(d_volume[x]-d_volume[c])*(d_volume[x]-d_volume[c]) );
else if ((i<M-1)&&(j==N-1)&&(k<ZETA-1))
d_TV[x]=sqrt( (d_volume[x]-d_volume[a])*(d_volume[x]-d_volume[a])+(d_volume[x]-d_volume[c])*(d_volume[x]-d_volume[c]) );
else if ((i<M-1)&&(j<N-1)&&(k==ZETA-1))
d_TV[x]=sqrt( (d_volume[x]-d_volume[a])*(d_volume[x]-d_volume[a])+(d_volume[x]-d_volume[b])*(d_volume[x]-d_volume[b]) );
else if ((i==M-1)&&(j==N-1)&&(k<ZETA-1))
d_TV[x]=abs( d_volume[x]-d_volume[c]);
else if ((i==M-1)&&(j<N-1)&&(k==ZETA-1))
d_TV[x]=abs( d_volume[x]-d_volume[b]);
else if ((i<M-1)&&(j==N-1)&&(k==ZETA-1))
d_TV[x]=abs( d_volume[x]-d_volume[a]);
}
double result = 0.0;
for (int i=0; i<M*N*ZETA; i++)
result+=d_TV[i];
free(d_TV);
return result;
}
//A function of check error, it will print out a message regarding the related information.
// Convenience function for checking CUDA runtime API results
// Can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess)
{
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
/*
void tv_gradient_calculate_3d_gpu_host(float *F_SART_temp, float *F_SART_gradient_tv, float epi_temp)
{
//bzero(F_SART_gradient_tv,sizeof(float)*M*N*ZETA);
// if (cudaSetDevice(2)!=cudaSuccess)
//{
// std::cout<<"Error when initializing device6!"<<std::endl;
// exit(-1);
//}
float *d_volumn_f_sart = NULL;
size_t d_volumn_f_sart_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(cudaMalloc((void**)&d_volumn_f_sart, d_volumn_f_sart_size));
//cutilSafeCall(cudaMemset(d_volume, 0, d_volume_size) );
cutilSafeCall(cudaMemcpy(d_volumn_f_sart, F_SART_temp, d_volumn_f_sart_size, cudaMemcpyHostToDevice) );
float *d_volumn_f_sart_gradient_tv = NULL;
size_t d_volumn_f_sart_gradient_tv_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(cudaMalloc((void**)&d_volumn_f_sart_gradient_tv, d_volumn_f_sart_gradient_tv_size));
cutilSafeCall(cudaMemset(d_volumn_f_sart_gradient_tv, 0, d_volumn_f_sart_gradient_tv_size) );
dim3 dimblock_tv_gradient(M-2);
dim3 dimgrid_tv_gradient(N-2,ZETA-2);
//calculate the tv matrix
tv_gradient_matrix_3d_kernel<<<dimgrid_tv_gradient, dimblock_tv_gradient>>>(d_volumn_f_sart_gradient_tv, d_volumn_f_sart, epi_temp);
cutilCheckMsg("Kernel execution failed");
cutilSafeCall(cudaMemcpy(F_SART_gradient_tv, d_volumn_f_sart_gradient_tv, d_volumn_f_sart_gradient_tv_size, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaFree(d_volumn_f_sart));
cutilSafeCall( cudaFree(d_volumn_f_sart_gradient_tv));
cudaThreadExit();
}
void backtracking_update_host(float *F_temp_update, float *F_temp, float *tv_gradient_matrix_temp, float alpha_k_temp)
{
// if (cudaSetDevice(2)!=cudaSuccess)
//{
// std::cout<<"Error when initializing device6!"<<std::endl;
// exit(-1);
//}
float *d_volumn_f_update = NULL;
size_t d_volumn_f_update_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(cudaMalloc((void**)&d_volumn_f_update, d_volumn_f_update_size));
cutilSafeCall(cudaMemcpy(d_volumn_f_update, F_temp_update, d_volumn_f_update_size, cudaMemcpyHostToDevice) );
float *d_volumn_f = NULL;
size_t d_volumn_f_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(cudaMalloc((void**)&d_volumn_f, d_volumn_f_size));
cutilSafeCall(cudaMemcpy(d_volumn_f, F_temp, d_volumn_f_size, cudaMemcpyHostToDevice) );
float *d_volumn_tv_gradient_matrix = NULL;
size_t d_volumn_tv_gradient_matrix_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(cudaMalloc((void**)&d_volumn_tv_gradient_matrix, d_volumn_tv_gradient_matrix_size));
cutilSafeCall(cudaMemcpy(d_volumn_tv_gradient_matrix, tv_gradient_matrix_temp, d_volumn_tv_gradient_matrix_size, cudaMemcpyHostToDevice) );
dim3 dimblock_update(M);
dim3 dimgrid_update(N,ZETA);
//calculate the tv matrix
backtracking_update_kernel<<<dimgrid_update, dimblock_update>>>(d_volumn_f_update, d_volumn_f, d_volumn_tv_gradient_matrix, alpha_k_temp);
cutilCheckMsg("Kernel execution failed");
cutilSafeCall(cudaMemcpy(F_temp_update, d_volumn_f_update, d_volumn_f_size, cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaFree(d_volumn_f_update));
cutilSafeCall( cudaFree(d_volumn_f));
cutilSafeCall( cudaFree(d_volumn_tv_gradient_matrix));
cudaThreadExit();
}
float gradient_f_norm_host(float *tv_gradient_matrix_temp)
{
// if (cudaSetDevice(2)!=cudaSuccess)
//{
// std::cout<<"Error when initializing device6!"<<std::endl;
// exit(-1);
//}
float *d_volumn_f = NULL;
size_t d_volumn_f_size = sizeof(float)*M*N*ZETA;
cutilSafeCall(cudaMalloc((void**)&d_volumn_f, d_volumn_f_size));
//cutilSafeCall(cudaMemset(d_volume, 0, d_volume_size) );
cutilSafeCall(cudaMemcpy(d_volumn_f, tv_gradient_matrix_temp, d_volumn_f_size, cudaMemcpyHostToDevice) );
float *d_volumn_df_l1 = NULL;
size_t d_volumn_df_l1_size = sizeof(float)*N*ZETA;
cutilSafeCall(cudaMalloc((void**)&d_volumn_df_l1, d_volumn_df_l1_size));
cutilSafeCall(cudaMemset(d_volumn_df_l1, 0, d_volumn_df_l1_size) );
float *d_volumn_df_l2 = NULL;
size_t d_volumn_df_l2_size = sizeof(float)*ZETA;
cutilSafeCall(cudaMalloc((void**)&d_volumn_df_l2, d_volumn_df_l2_size));
cutilSafeCall(cudaMemset(d_volumn_df_l2, 0, d_volumn_df_l2_size) );
float *d_volumn_df_sum = NULL;
size_t d_volumn_df_sum_size = sizeof(float)*1;
cutilSafeCall(cudaMalloc((void**)&d_volumn_df_sum, d_volumn_df_sum_size));
cutilSafeCall(cudaMemset(d_volumn_df_sum, 0, d_volumn_df_sum_size) );
float *norm_result_temp = (float *)malloc(sizeof(float)*1);
norm_result_temp[0] = 0.0f;
float norm_result = 0.0f;
dim3 dimblock_norm_l1(M,1,1);
dim3 dimgrid_norm_l1(N,ZETA,1);
dim3 dimblock_norm_l2(N,1,1);
dim3 dimgrid_norm_l2(ZETA,1,1);
dim3 dimblock_norm_sum(ZETA,1,1);
dim3 dimgrid_norm_sum(1,1,1);
//calculate the norm_2
reduce_norm_2_kernel_l1<<<dimgrid_norm_l1, dimblock_norm_l1, sizeof(float)*M>>>(d_volumn_f, d_volumn_df_l1, M*N*ZETA);
reduce_norm_2_kernel_l2<<<dimgrid_norm_l2, dimblock_norm_l2, sizeof(float)*N>>>(d_volumn_df_l1, d_volumn_df_l2, N*ZETA);
reduce_norm_2_kernel_end<<<dimgrid_norm_sum, dimblock_norm_sum, sizeof(float)*ZETA>>>(d_volumn_df_l2, d_volumn_df_sum, ZETA);
cutilCheckMsg("Kernel execution failed");
cutilSafeCall(cudaMemcpy(norm_result_temp, d_volumn_df_sum, d_volumn_df_sum_size, cudaMemcpyDeviceToHost) );
norm_result = norm_result_temp[0];
//printf("TV value calculation one time \n");
//printf( "Processing time: %f (ms)\n", cutGetTimerValue(timer) );
cutilSafeCall(cudaFree(d_volumn_f));
cutilSafeCall( cudaFree(d_volumn_df_l1));
cutilSafeCall( cudaFree(d_volumn_df_l2));
cutilSafeCall( cudaFree(d_volumn_df_sum));
cudaThreadExit();
free(norm_result_temp);
return norm_result;
}
void line_search_host(float *F_SART_temp, float *F_SART_TV_temp, float epi_temp)
{
// if (cudaSetDevice(2)!=cudaSuccess)
//{
// std::cout<<"Error when initializing device6!"<<std::endl;
// exit(-1);
//}
unsigned int timer = 0;
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
float alpha_k = 1.0f;
float rho = 0.5f;
float c = 0.0001f;
//float epi_temp = 1.0e-8f;
float c_alpha_f_pk = 0.0f;
float f_pk = 0.0f;
//copy the f_sart to f_sart_tv
//then start to do the line search
memcpy(F_SART_TV_temp, F_SART_temp, sizeof(float)*M*N*ZETA);
float *tv_gradient_matrix =(float *)malloc(sizeof(float)*M*N*ZETA);
bzero(tv_gradient_matrix,sizeof(float)*M*N*ZETA);
//calculate the tv gradient matrix
tv_gradient_calculate_3d_gpu_host(F_SART_temp, tv_gradient_matrix, epi_temp);
float tv_value_old;
float tv_value_new;
tv_value_old = tv_value_calculate_3d_gpu_host(F_SART_temp);
backtracking_update_host(F_SART_TV_temp, F_SART_TV_temp, tv_gradient_matrix, alpha_k);
tv_value_new = tv_value_calculate_3d_gpu_host(F_SART_TV_temp);
f_pk =-gradient_f_norm_host(tv_gradient_matrix);
c_alpha_f_pk = c*alpha_k*f_pk;
while (tv_value_new > (tv_value_old + c_alpha_f_pk) )
{
alpha_k = alpha_k *rho;
c_alpha_f_pk = c*alpha_k*f_pk;
backtracking_update_host(F_SART_TV_temp, F_SART_temp, tv_gradient_matrix, alpha_k);
//for(int i=0;i<f_size;i++)
// f_sart_tv[i] = f_sart[i] - alpha_k*tv_gradient_matrix[i];
tv_value_new = tv_value_calculate_3d_gpu_host(F_SART_TV_temp);
}
cutilCheckError(cutStopTimer(timer));
printf("Line search one time \n");
printf( "Processing time: %f (ms)\n", cutGetTimerValue(timer) );
free(tv_gradient_matrix);
}
*/ |
c69162c4d06cd3aaddc54d9e1e322e0051b51dc0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "remove_cols.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *deleted_cols = NULL;
hipMalloc(&deleted_cols, XSIZE*YSIZE);
int *col_group = NULL;
hipMalloc(&col_group, XSIZE*YSIZE);
const int conflict_col_id = 1;
const int total_dl_matrix_col_num = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
remove_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, deleted_cols,col_group,conflict_col_id,total_dl_matrix_col_num);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
remove_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, deleted_cols,col_group,conflict_col_id,total_dl_matrix_col_num);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
remove_cols), dim3(gridBlock),dim3(threadBlock), 0, 0, deleted_cols,col_group,conflict_col_id,total_dl_matrix_col_num);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c69162c4d06cd3aaddc54d9e1e322e0051b51dc0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "remove_cols.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *deleted_cols = NULL;
cudaMalloc(&deleted_cols, XSIZE*YSIZE);
int *col_group = NULL;
cudaMalloc(&col_group, XSIZE*YSIZE);
const int conflict_col_id = 1;
const int total_dl_matrix_col_num = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
remove_cols<<<gridBlock,threadBlock>>>(deleted_cols,col_group,conflict_col_id,total_dl_matrix_col_num);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
remove_cols<<<gridBlock,threadBlock>>>(deleted_cols,col_group,conflict_col_id,total_dl_matrix_col_num);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
remove_cols<<<gridBlock,threadBlock>>>(deleted_cols,col_group,conflict_col_id,total_dl_matrix_col_num);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a3a3e5f38d53d7dcd66607bbc1e85378ceff1f20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "gradops_hdvmax.cuh"
__global__ void hyperdifviscmax_parallel(struct params *p, real *w, real *wnew, real *wmod,
real *dwn1, real *wd, int order, real *wtemp, int field, int dim)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt=0,max3=0, max1=0;
j=iindex/ni;
//i=iindex-j*(iindex/ni);
i=iindex-(j*ni);
p->maxviscoef=0;
if(i<((p->n[0])) && j<((p->n[1])))
{
if(wd[fencode_hdvmax(p,i,j,hdnur)]>(p->maxviscoef))
p->maxviscoef=wd[fencode_hdvmax(p,i,j,hdnur)];
if(wd[fencode_hdvmax(p,i,j,hdnul)]>(p->maxviscoef))
p->maxviscoef=wd[fencode_hdvmax(p,i,j,hdnul)];
}
__syncthreads();
/* if(i<((p->n[0])) && j<((p->n[1])))
{
if(wd[fencode_hdvmax(p,i,j,hdnul)]>(p->maxviscoef))
p->maxviscoef=wd[fencode_hdvmax(p,i,j,hdnul)];
}
__syncthreads();*/
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_hdvmax(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuhyperdifviscmax(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order, real **d_wtemp, int field, int dim)
{
//printf("calling propagate solution\n");
//dim3 dimBlock(blocksize, blocksize);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
//__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod,
// real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd)
//init_parallel(struct params *p, real *b, real *u, real *v, real *h)
hipLaunchKernelGGL(( hyperdifviscmax_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order, *d_wtemp, field, dim);
//prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_hdv,*d_u,*d_v,*d_h);
//printf("called prop\n");
hipDeviceSynchronize();
//boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_hdv,*d_w,*d_wnew);
//printf("called boundary\n");
//hipDeviceSynchronize();
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_hdv,*d_w,*d_wnew);
//printf("called update\n");
// hipDeviceSynchronize();
// hipMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), hipMemcpyDeviceToHost);
//hipMemcpy(*b, *d_hdv, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), hipMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
| a3a3e5f38d53d7dcd66607bbc1e85378ceff1f20.cu | #include "cudapars.h"
#include "paramssteeringtest1.h"
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include "step.h"
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
#include "gradops_hdvmax.cuh"
__global__ void hyperdifviscmax_parallel(struct params *p, real *w, real *wnew, real *wmod,
real *dwn1, real *wd, int order, real *wtemp, int field, int dim)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.y * blockDim.y + threadIdx.y;
int iindex = blockIdx.x * blockDim.x + threadIdx.x;
int i,j;
int is,js;
int index,k;
int ni=p->n[0];
int nj=p->n[1];
real dt=p->dt;
real dy=p->dx[1];
real dx=p->dx[0];
//real g=p->g;
// dt=1.0;
//dt=0.05;
//enum vars rho, mom1, mom2, mom3, energy, b1, b2, b3;
real maxt=0,max3=0, max1=0;
j=iindex/ni;
//i=iindex-j*(iindex/ni);
i=iindex-(j*ni);
p->maxviscoef=0;
if(i<((p->n[0])) && j<((p->n[1])))
{
if(wd[fencode_hdvmax(p,i,j,hdnur)]>(p->maxviscoef))
p->maxviscoef=wd[fencode_hdvmax(p,i,j,hdnur)];
if(wd[fencode_hdvmax(p,i,j,hdnul)]>(p->maxviscoef))
p->maxviscoef=wd[fencode_hdvmax(p,i,j,hdnul)];
}
__syncthreads();
/* if(i<((p->n[0])) && j<((p->n[1])))
{
if(wd[fencode_hdvmax(p,i,j,hdnul)]>(p->maxviscoef))
p->maxviscoef=wd[fencode_hdvmax(p,i,j,hdnul)];
}
__syncthreads();*/
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors_hdvmax(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
int cuhyperdifviscmax(struct params **p, real **w, real **wnew, struct params **d_p, real **d_w, real **d_wnew, real **d_wmod, real **d_dwn1, real **d_wd, int order, real **d_wtemp, int field, int dim)
{
//printf("calling propagate solution\n");
//dim3 dimBlock(blocksize, blocksize);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimBlock(dimblock, 1);
//dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
dim3 dimGrid(((*p)->n[0])/dimBlock.x,((*p)->n[1])/dimBlock.y);
int numBlocks = (((*p)->n[0])*((*p)->n[1])+numThreadsPerBlock-1) / numThreadsPerBlock;
//__global__ void prop_parallel(struct params *p, real *b, real *w, real *wnew, real *wmod,
// real *dwn1, real *dwn2, real *dwn3, real *dwn4, real *wd)
//init_parallel(struct params *p, real *b, real *u, real *v, real *h)
hyperdifviscmax_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_w,*d_wnew, *d_wmod, *d_dwn1, *d_wd, order, *d_wtemp, field, dim);
//prop_parallel<<<dimGrid,dimBlock>>>(*d_p,*d_hdv,*d_u,*d_v,*d_h);
//printf("called prop\n");
cudaThreadSynchronize();
//boundary_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_hdv,*d_w,*d_wnew);
//printf("called boundary\n");
//cudaThreadSynchronize();
//update_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p,*d_hdv,*d_w,*d_wnew);
//printf("called update\n");
// cudaThreadSynchronize();
// cudaMemcpy(*w, *d_w, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*wnew, *d_wnew, NVAR*((*p)->n[0])* ((*p)->n[1])*sizeof(real), cudaMemcpyDeviceToHost);
//cudaMemcpy(*b, *d_hdv, (((*p)->n[0])* ((*p)->n[1]))*sizeof(real), cudaMemcpyDeviceToHost);
//checkErrors("copy data from device");
}
|
62408c9922596eec277bddb0e03bfca9be48c1d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<time.h>
#pragma pack (1)
//
#define GauSize 5
typedef struct{
short type; //BM
int size; //
short reserved1; //0
short reserved2; //0
int offset; //
}BMPHeader;
typedef struct
{
int size; //BMP
int width; //
int height; //
short planes; // 1
short bitsPerPixel; //
unsigned compression; // 0 1BI_RLE8 2BI_RLE4
unsigned imageSize; //
int xPelsPerMeter; //0
int yPelsPerMeter; //
int clrUsed; //0
int clrImportant; //0
}BMPInfoHeader;
// typedef struct {
// unsigned char x,y,z; //bmp24bgr 4 0
// }uchar3;
void loadBMPFile(uchar3 ** dst, int *width , int *height , const char *name){
BMPHeader hdr;
BMPInfoHeader infoHdr;
int x,y;
FILE *fd ;
printf("Loading %s ...\n", name);
if(sizeof(uchar3) !=3){
printf("***** uchaar3 is not 3 bytes *****\n");
return ;
}
if( !(fd = fopen(name, "rb"))){
printf("***** fail to open %s *****\n", name);
return ;
}
fread(&hdr , sizeof(hdr) , 1, fd);
if(hdr.type != 0x4d42){
printf("***** it is not a bmp file *****\n");
return ;
}
fread(&infoHdr , sizeof(infoHdr) , 1, fd);
if(infoHdr.bitsPerPixel !=24){
printf("***** invalid color depth (24 bits needed) *****\n");
printf("It is %hd\n" , infoHdr.bitsPerPixel);
printf("size of short : %d\n",sizeof(short));
printf("size of hdr is %d , infoHdr is %d\n" , sizeof(hdr) , sizeof(infoHdr));
return;
}
printf("size of short : %d\n",sizeof(short));
printf("size of hdr is %d , infoHdr is %d\n" , sizeof(hdr) , sizeof(infoHdr));
if(infoHdr.compression){
printf("***** cannot solve copressed image *****\n");
return ;
}
*width = infoHdr.width;
*height = infoHdr.height;
//malloc space for image data
*dst = (uchar3 *)malloc(*width * *height * sizeof(uchar3));
// hipMallocManaged(dst , *width * *height * sizeof(uchar3));
printf("image width: %u\n", infoHdr.width);
printf("image height: %u\n", infoHdr.height);
fseek(fd, hdr.offset - sizeof(hdr) - sizeof(infoHdr) , SEEK_CUR);
for(y = 0 ; y < infoHdr.height ; y++){
for(x = 0 ; x < infoHdr.width ; x++){
(*dst)[ y * infoHdr.width + x ].z = fgetc(fd);
(*dst)[ y * infoHdr.width + x ].y = fgetc(fd);
(*dst)[ y * infoHdr.width + x ].x = fgetc(fd);
}
//pass filling bytes
for(x = 0 ; x < (4 - (3*infoHdr.width)%4)%4 ; x++ ){
fgetc(fd);
}
}
printf("image file loaded successful! \n");
fclose(fd);
}
void saveBMPFile(uchar3 *dst , int width , int height , const char *name){
printf("in save bmp file\n");
BMPHeader hdr;
BMPInfoHeader infoHdr;
hdr.type = 0x4d42;
hdr.reserved1 = 0;
hdr.reserved2 = 0;
hdr.offset = 54;
hdr.size = hdr.offset + 3 * height * (width + (4-(width*3)%4)%4 );
infoHdr.size = 40;
infoHdr.width = width;
infoHdr.height = height;
infoHdr.planes = 1;
infoHdr.bitsPerPixel = 24;
infoHdr.compression = 0;
infoHdr.imageSize = hdr.size - hdr.offset;
infoHdr.xPelsPerMeter = 0;
infoHdr.yPelsPerMeter = 0;
infoHdr.clrImportant = 0;
infoHdr.clrUsed = 0;
FILE *fd;
if(! (fd=fopen(name,"wb"))){
printf("***** fail to open dest file *****\n");
return;
}
fwrite(&hdr , sizeof(hdr) , 1 , fd);
fwrite(&infoHdr , sizeof(infoHdr) , 1 , fd);
int x,y;
for(y = 0; y < infoHdr.height ; y++){
for(x = 0 ; x < infoHdr.width ; x++){
fputc( dst[ y * infoHdr.width + x ].z , fd);
fputc( dst[ y * infoHdr.width + x ].y , fd);
fputc( dst[ y * infoHdr.width + x ].x , fd);
// printf("save one pixel\n");
}
//pass filling bytes
for(x = 0 ; x < (4 - (3*infoHdr.width)%4)%4 ; x++ ){
fputc(0 , fd);
}
}
printf("image file writed over\n");
fclose(fd);
}
__global__ void genGauss(float *gauss , int size , float sig ){
__shared__ float sum;
sum=0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int mid = size/2 ;
float sigma = sig;
if(x<size && y<size){
gauss[y*size + x] = __expf(-0.5*((x - mid)*(x - mid)+(y - mid)*(y - mid)) / (sigma*sigma)) / (2 * 3.1415926 * sigma *sigma);
atomicAdd(&sum , gauss[y*size + x]);
}
__syncthreads();
if(x<size && y<size){
gauss[y*size + x] = gauss[y*size + x] /sum;
}
}
__device__ float cannyRgb2grayscale(uchar3 pix){
return (pix.x*0.299 + pix.y*0.587 + pix.z*0.114);
}
__global__ void rgb2grayscale(uchar3 * pix , int width , int height , float * ping){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
if(x < width && y < height){
ping[index] = pix[index].x*0.299 + pix[index].y*0.587 + pix[index].z*0.114;
}
}
__global__ void gaussBlur(float * ping, float * pang , int width , int height , float * gauss , int gaussSize){
__shared__ float gau[GauSize][GauSize];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
//load guass matrix
if(threadIdx.x<GauSize && threadIdx.y<GauSize){
gau[threadIdx.y][threadIdx.x] = gauss[ threadIdx.y * GauSize + threadIdx.x];
}
__syncthreads();
//blur
if(x<width && y<height ){
float blurValue = 0.0f;
int mid = GauSize/2 +1;
for(int i = mid-GauSize ; i <= GauSize-mid; i++){
for(int j = mid-GauSize ; j <= GauSize-mid; j++){
// blurValue += gau[i+mid-1][j+mid-1]*dst[(y+i)*width + x+j].x;
if((i+y)<height && (i+y)>=0 && (j+x)<width && (j+x)>=0 )
blurValue += gau[i+mid-1][j+mid-1] * ping[index + i*width +j];
}
}
// src[index].x = src[index].y = src[index].z = blurValue;
pang[index] = blurValue;
}
}
__global__ void calcGradient(float *pang,float *ping , float * dx , float *dy , int width , int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
if(x<width-1 && y< height-1){
dx[index] = pang[index + 1] - pang[index];
dy[index] = pang[index + width] - pang[index];
//ping is gradient now
ping[index] = sqrtf(dx[index]*dx[index]+dy[index]*dy[index] );
}
if(x<width && y< height && (x==width-1 || y==height-1)){
ping[index] = 0;
}
}
__global__ void nonMaxRestrain(float *pang, float *ping , float *dx , float *dy , int width , int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
if(x<width -2 && y<height -2 && x>0 &&y>0){
if(pang[index] <= 0.0000000001f && pang[index] >= -0.0000000001f){
ping[index] = 0.0f;
}
else{
float dxAbs = abs(dx[index]);
float dyAbs = abs(dy[index]);
float weight = 0;
float grad[4];
if(dyAbs > dxAbs){
weight = dxAbs/dyAbs;
grad[1] = pang[index - width];
grad[3] = pang[index + width];
if(dx[index]*dy[index]>0){
grad[0] = pang[index -width - 1];
grad[2] = pang[index +width + 1];
}
else{
grad[0] = pang[index -width + 1];
grad[2] = pang[index +width - 1];
}
}else{
weight = dyAbs / dxAbs;
grad[1] = pang[index - 1];
grad[3] = pang[index + 1];
if(dx[index]*dy[index]>0){
// grad[0] = pang[index +width - 1];
// grad[2] = pang[index -width + 1];
grad[0] = pang[index -width - 1];
grad[2] = pang[index +width + 1];
}
else{
// grad[0] = pang[index -width - 1];
// grad[2] = pang[index +width + 1];
grad[0] = pang[index -width + 1];
grad[2] = pang[index +width - 1];
}
}
//
// float grad1 = weight * grad[0] + (1-weight) * grad[1];
// float grad2 = weight * grad[2] + (1-weight) * grad[3];
float grad1 = weight * grad[1] + (1-weight) * grad[0];
float grad2 = weight * grad[3] + (1-weight) * grad[2];
if(pang[index] > grad1 && pang[index] > grad2){
ping[index] = pang[index];
} else{
ping[index] = 0.0f;
}
}
}//x<width y<height
// else{
// ping[index] = 0;
// }
}
__global__ void findMaxGrad(float * ping , int width , int height , float * max){
extern __shared__ float temp[];
int tid = threadIdx.x;
int idx = tid + blockDim.x * blockIdx.x;
if(idx<width*height){
temp[tid] = ping[idx];
}else{
temp[tid] = 0;
}
int prevSize = blockDim.x;
for(int d = blockDim.x >>1 ; d>0; d >>= 1){
__syncthreads();
if(tid<d ){
temp[tid] = temp[tid]>temp[tid+d]?temp[tid]:temp[tid + d];
//for arbitrary block size
if(prevSize > d*2 && (tid+2*d)<prevSize ){
temp[tid] = temp[tid]>temp[tid+2*d]?temp[tid]:temp[tid + 2*d];
}
prevSize = d;
}
}
if(tid==0)max[blockIdx.x] = temp[0];
}
__global__ void edgeTrace(float *ping , float *pang , float *maxptr , int width , int height , int iteration , float hsigma , float lsigma){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
float max = *maxptr;
float lhd = max * lsigma;
float hhd = max * hsigma;
for(int i =0 ; i<iteration ;i++){
__syncthreads();
if(x<width -1 && y<height -1 && x>0 && y>0){
if(ping[index] < lhd)pang[index] = 0;
else if(ping[index]>= hhd)pang[index]=1;
else {
if(ping[index-width-1]>=hhd || ping[index-width]>=hhd ||ping[index-width+1]>=hhd
|| ping[index-1]>=hhd || ping[index+1]>=hhd
|| ping[index+width-1]>=hhd || ping[index+width]>=hhd ||ping[index+width+1]>=hhd){
pang[index] = 1;
ping[index] = hhd;
}else{
pang[index] = 0;
ping[index] = hhd;
}
}
}//x, y
}
}
__global__ void edgeTraceOptimized(float *ping , float *pang , float *maxptr , int width , int height , int iteration , float hsigma , float lsigma){
__shared__ float tempPing[34][34] ;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
float max = *maxptr;
float lhd = max * lsigma;
float hhd = max * hsigma;
int tx = threadIdx.x+1;
int ty = threadIdx.y+1;
if(x<width && y < height)tempPing[threadIdx.y +1][threadIdx.x +1] = ping[index];
if(threadIdx.x ==0 ){
if(x>0 && x<width && y<height){
tempPing[threadIdx.y +1][0] = ping[index - 1];
}
else{
tempPing[threadIdx.y +1][0] = 0;
}
}
if(threadIdx.y ==0){
if(y>0 && x<width && y<height){
tempPing[0][threadIdx.x +1] = ping[index - width];
}
else {
tempPing[0][threadIdx.x +1] = 0;
}
}
if(threadIdx.x == 31){
if(x<width-1 && y<height){
tempPing[threadIdx.y +1][33] = ping[index+1];
}
else {
tempPing[threadIdx.y +1][33] = 0;
}
}
if(threadIdx.y ==31){
if(y<height-1 && x<width){
tempPing[33][threadIdx.x +1] = ping[index + width];
}
else{
tempPing[33][threadIdx.x +1] = 0;
}
}
if(threadIdx.x == 0 && threadIdx.y == 0){
if(x>0 && y >0 && x<width && y<height)tempPing[0][0] = ping[index -width -1];
else tempPing[0][0] = 0;
}
if(threadIdx.x == 31 && threadIdx.y == 0){
if(y>0 && x < width-1 && y <height)tempPing[0][33] = ping[index - width +1];
else tempPing[0][33] = 0;
}
if(threadIdx.x == 31 && threadIdx.y == 31){
if(y<height-1 && x<width-1)tempPing[33][33] = ping[index + width +1];
else tempPing[33][33] = 0;
}
if(threadIdx.x == 0 && threadIdx.y == 31){
if(x>0 && y<height-1 && x<width)tempPing[33][0] = ping[index + width -1];
else tempPing[33][0] = 0;
}
__syncthreads();
for(int i =0 ; i<iteration ;i++){
__syncthreads();
if(x<width && y<height){
if(tempPing[ty][tx] < lhd)pang[index] = 0;
else if(tempPing[ty][tx]>= hhd)pang[index]=1;
else {
if(tempPing[ty-1][tx-1]>=hhd || tempPing[ty-1][tx]>=hhd ||tempPing[ty-1][tx+1]>=hhd
|| tempPing[ty][tx-1]>=hhd || tempPing[ty][tx+1]>=hhd
|| tempPing[ty+1][tx-1]>=hhd || tempPing[ty+1][tx]>=hhd ||tempPing[ty+1][tx+1]>=hhd){
pang[index] = 1;
tempPing[ty][tx] = hhd;
}else{
pang[index] = 0;
tempPing[ty][tx]= hhd;
}
}
}//x, y
}
}
__global__ void trans2Bmp(float *ping , uchar3* imagedata , int width ,int height , int rev){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
if(x< width && y < height){
// imagedata[index].x = imagedata[index].y = imagedata[index].z = (ping[index]<25?0:255);
if(rev)imagedata[index].x = imagedata[index].y = imagedata[index].z = ping[index]*255;
else imagedata[index].x = imagedata[index].y = imagedata[index].z =(1- ping[index])*255;
// imagedata[index].x = imagedata[index].y = imagedata[index].z = ping[index];
}
}
void cookargument(int argc , char* argv[] , float *gaussSigma , float *hsigma , float *lsigma ,int *iteration , char** srcName , char** dstName , int *opti , int *rev){
//init
*gaussSigma = 1.2; //-gs
*hsigma = 0.2; //-hs
*lsigma = 0.1; //-ls
*iteration = 10; //-it
*opti =1; //-opt
*rev = 1; //-rev
//-src
//-dst
//no argument
if(argc ==1)return ;
int status;
for(int i=1 ; i<argc;i++){
//set gaussSigma
if(strcmp("-gs",argv[i])==0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%f" , gaussSigma);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}
else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set hsigma
if(strcmp("-hs" , argv[i]) == 0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%f" , hsigma);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set lsigma
if(strcmp("-ls" , argv[i]) == 0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%f" , lsigma);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set iteration
if(strcmp("-it" , argv[i]) == 0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%d" , iteration);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set src file name
if(strcmp("-src" , argv[i]) == 0){
if(i<(argc-1)){
i++;
*srcName = argv[i];
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set dst file name
if(strcmp("-dst" , argv[i]) == 0){
if(i<(argc-1)){
i++;
*dstName = argv[i];
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set opotimized or not
if(strcmp("-opt" , argv[i]) == 0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%d" , opti);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set edge color 1 white 0 black
if(strcmp("-rev" , argv[i]) == 0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%d" , rev);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//print help
if(strcmp("-help" , argv[i]) == 0 ){
printf("-src srcname : set src image file, must be 24 bit bmp file\n");
printf("-dst dstname : set dst image file, must be 24 bit bmp file\n");
printf("-gs gaussSigma: set gaussSigma\n");
printf("-ls lowThreshold : set lowthreshold in canny\n");
printf("-hs highThreshold: set highthreshold in canny\n");
printf("-it iteration : set iteration number of tracing edge\n");
printf("-opt 1/0 : set ioptimimze tracing edge or not\n");
printf("-rev 1/0 : set edge color black(0)/white(1)\n");
printf("-help :show info like this\n");
exit(0);
}
}//for
//test print
// printf("gausssigma:%f\n", *gaussSigma);
// printf("lsigma:%f\n", *lsigma);
// printf("hsigma:%f\n", *hsigma);
// printf("iteration:%d\n", *iteration);
//check value
if(*iteration <1){
printf("*****invalid iteration*****\n");
exit(1);
}
if(*lsigma > *hsigma || *lsigma < 0 || *hsigma >1){
printf("*****invalid l/hsigma value*****\n");
exit(1);
}
if(*gaussSigma<=0){
printf("*****invalid gauss sigma value*****\n");
exit(1);
}
}
int main(int argc , char* argv[]){
clock_t start , end;
start = clock();
clock_t startCanny , endCanny;
uchar3 *imagedataSrc , *imagedataDst;
uchar3 *d_imagedataSrc ,*d_imagedataDst;
float *d_gauss;
float * d_ping, *d_pang;
float *d_dx , *d_dy;
float *d_max;
// float gauss[GauSize][GauSize];
int gaussSize = GauSize;
int width , height;
float gaussSigma =1.2;
float hsigma = 0.15;
float lsigma = 0.1;
int iteration = 10;
int optimized = 1;
int rev = 1;
// const char srcName[] = "../data/src3.bmp";
// const char dstName[] = "../data/out.bmp";
char *srcName = NULL ;
char *dstName = NULL ;
cookargument(argc , argv , &gaussSigma , &hsigma , &lsigma ,&iteration , &srcName , &dstName , &optimized,&rev);
if(srcName ==NULL)srcName = "../data/src3.bmp";
if(dstName ==NULL)dstName = "../data/out.bmp";
//load bmp format image file
loadBMPFile(&imagedataSrc , &width , &height , srcName );
// height /= 4;
hipError_t cudaStatus = hipMalloc((void**)&d_gauss , gaussSize*gaussSize*sizeof(float));
if(cudaStatus!=hipSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
// hipMalloc(&d_imagedataDst , width*height*sizeof(uchar3));
cudaStatus = hipMalloc(&d_ping , width*height*sizeof(float));
if(cudaStatus!=hipSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
cudaStatus = hipMalloc(&d_pang , width*height*sizeof(float));
if(cudaStatus!=hipSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
cudaStatus = hipMalloc(&d_dx , width*height*sizeof(float));
if(cudaStatus!=hipSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
cudaStatus = hipMalloc(&d_dy , width*height*sizeof(float));
if(cudaStatus!=hipSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
cudaStatus = hipMalloc(&d_imagedataSrc , width*height*sizeof(uchar3));
if(cudaStatus!=hipSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
// imagedataDst = (uchar3*)malloc( width*height*sizeof(uchar3));
cudaStatus = hipMemcpy(d_imagedataSrc , imagedataSrc , width*height*sizeof(uchar3) , hipMemcpyHostToDevice );
if(cudaStatus!=hipSuccess){
printf("*****fail to copy memory*****\n");
return 1;
}
dim3 numofBlock(32 , 32 , 1);
dim3 threadsPerBlock((width+31)/32 , (height+31)/32, 1);
int threadnum = numofBlock.x*numofBlock.y*numofBlock.z;
int blocknum = (width*height+threadnum-1)/threadnum;
hipMalloc(&d_max , blocknum*sizeof(float));
//main process in gpu
startCanny = clock();
//generate gaussian filter
hipLaunchKernelGGL(( genGauss), dim3(1) ,dim3(numofBlock) , 0, 0, d_gauss , gaussSize ,gaussSigma);
// hipDeviceSynchronize();
//calc image's grayscale value
hipLaunchKernelGGL(( rgb2grayscale), dim3(threadsPerBlock), dim3(numofBlock), 0, 0, d_imagedataSrc , width , height , d_ping );
// hipDeviceSynchronize();
//use gaussian filter
hipLaunchKernelGGL(( gaussBlur), dim3(threadsPerBlock), dim3(numofBlock), 0, 0, d_ping , d_pang , width , height , d_gauss , gaussSize);
// hipDeviceSynchronize();
//calc gradient of every pixel
hipLaunchKernelGGL(( calcGradient), dim3(threadsPerBlock), dim3(numofBlock), 0, 0, d_pang ,d_ping , d_dx , d_dy , width , height);
//non max gradient restrain
hipLaunchKernelGGL(( nonMaxRestrain), dim3(threadsPerBlock), dim3(numofBlock), 0, 0, d_ping , d_pang , d_dx , d_dy , width , height);
//find max gradient of the whole image
//local max
hipLaunchKernelGGL(( findMaxGrad), dim3(blocknum),dim3(threadnum) , threadnum*sizeof(float), 0, d_pang , width , height , d_max);
//global max
hipLaunchKernelGGL(( findMaxGrad), dim3(1), dim3(blocknum) , blocknum*sizeof(float), 0, d_max , blocknum , 1 , d_max);
// hipDeviceSynchronize();
// float max;
// hipMemcpy(&max , d_max , sizeof(float) , hipMemcpyDeviceToHost );
// printf("max is %f\n",max);
// printf("blocknum is %d\n" , blocknum);
//Tracing edges through the image and hysteresis thresholding
if(hipLaunchKernelGGL((!optimized)edgeTrace), dim3(threadsPerBlock), dim3(numofBlock), 0, 0, d_pang ,d_ping , d_max , width , height , iteration , hsigma , lsigma);
elsehipLaunchKernelGGL(( edgeTraceOptimized), dim3(threadsPerBlock), dim3(numofBlock), 0, 0, d_pang ,d_ping , d_max , width , height , iteration , hsigma , lsigma);
//translate bit value to bmp file format
hipLaunchKernelGGL(( trans2Bmp), dim3(threadsPerBlock), dim3(numofBlock), 0, 0, d_ping , d_imagedataSrc , width , height,rev);
//main process end
cudaStatus = hipDeviceSynchronize();
if(cudaStatus != hipSuccess){
printf("*****kernel function down*****\n");
}
endCanny = clock();
printf("------canny use %ld ms\n" , endCanny - startCanny);
cudaStatus = hipMemcpy(imagedataSrc , d_imagedataSrc , width*height*sizeof(uchar3) , hipMemcpyDeviceToHost );
if(cudaStatus!=hipSuccess){
printf("*****fail to copy memory from device*****\n");
return 1;
}
// for(int i=0 ; i<width*height/1024;i++){
// cudaStatus = hipMemcpy(imagedataSrc+i*1024 , d_imagedataSrc+i*1024 , 1024*sizeof(uchar3) , hipMemcpyDeviceToHost );
// if(cudaStatus!=hipSuccess){
// printf("*****fail to copy memory from device*****\n");
// return 1;
// }
// }
// if((width*height)%1024!=0){
// cudaStatus = hipMemcpy(imagedataSrc+i*1024 , d_imagedataSrc+i*1024 , ((width*height)%1024)*sizeof(uchar3) , hipMemcpyDeviceToHost );
// if(cudaStatus!=hipSuccess){
// printf("*****fail to copy memory from device*****\n");
// return 1;
// }
// }
printf("before save bmlp file\n");
printf("width:%d height:%d\n", width , height);
saveBMPFile(imagedataSrc ,width , height , dstName);
// hipFree(d_imagedataDst);
hipFree(d_imagedataSrc);
hipFree(d_gauss);
hipFree(d_ping);
hipFree(d_pang);
hipFree(d_dx);
hipFree(d_dy);
hipFree(d_max);
// free(imagedataDst);
free(imagedataSrc);
end = clock();
printf("------total use %ld ms\n" , end - start);
return 0;
}
| 62408c9922596eec277bddb0e03bfca9be48c1d6.cu | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<time.h>
#pragma pack (1)
//设置对齐方式
#define GauSize 5
typedef struct{
short type; //文件类型,必须为BM
int size; //整个位图文件的大小,以字节为单位
short reserved1; //保留,全0
short reserved2; //保留,全0
int offset; //位图数据的起始位置,字节为单位
}BMPHeader;
typedef struct
{
int size; //BMP 信息头的大小,字节为单位
int width; //宽度,以像素为单位
int height; //高度,以像素为单位
short planes; // 一般为1
short bitsPerPixel; //每个像素所用的位数
unsigned compression; //压缩类型 0:不压缩 1:BI_RLE8压缩类型 2:BI_RLE4压缩类型
unsigned imageSize; //位图数据部分的大小,以字节为单位
int xPelsPerMeter; //水平分辨率,每米像素数,可置为0
int yPelsPerMeter; //同上
int clrUsed; //位图实际使用的颜色表中的颜色数,置为0都使用
int clrImportant; //位图显示时重要的颜色数,置为0都重要
}BMPInfoHeader;
// typedef struct {
// unsigned char x,y,z; //bmp中24位位图数据存储的单个像素通道顺序是bgr , 且每行必须为4的倍数, 不够用0填充
// }uchar3;
void loadBMPFile(uchar3 ** dst, int *width , int *height , const char *name){
BMPHeader hdr;
BMPInfoHeader infoHdr;
int x,y;
FILE *fd ;
printf("Loading %s ...\n", name);
if(sizeof(uchar3) !=3){
printf("***** uchaar3 is not 3 bytes *****\n");
return ;
}
if( !(fd = fopen(name, "rb"))){
printf("***** fail to open %s *****\n", name);
return ;
}
fread(&hdr , sizeof(hdr) , 1, fd);
if(hdr.type != 0x4d42){
printf("***** it is not a bmp file *****\n");
return ;
}
fread(&infoHdr , sizeof(infoHdr) , 1, fd);
if(infoHdr.bitsPerPixel !=24){
printf("***** invalid color depth (24 bits needed) *****\n");
printf("It is %hd\n" , infoHdr.bitsPerPixel);
printf("size of short : %d\n",sizeof(short));
printf("size of hdr is %d , infoHdr is %d\n" , sizeof(hdr) , sizeof(infoHdr));
return;
}
printf("size of short : %d\n",sizeof(short));
printf("size of hdr is %d , infoHdr is %d\n" , sizeof(hdr) , sizeof(infoHdr));
if(infoHdr.compression){
printf("***** cannot solve copressed image *****\n");
return ;
}
*width = infoHdr.width;
*height = infoHdr.height;
//malloc space for image data
*dst = (uchar3 *)malloc(*width * *height * sizeof(uchar3));
// cudaMallocManaged(dst , *width * *height * sizeof(uchar3));
printf("image width: %u\n", infoHdr.width);
printf("image height: %u\n", infoHdr.height);
fseek(fd, hdr.offset - sizeof(hdr) - sizeof(infoHdr) , SEEK_CUR);
for(y = 0 ; y < infoHdr.height ; y++){
for(x = 0 ; x < infoHdr.width ; x++){
(*dst)[ y * infoHdr.width + x ].z = fgetc(fd);
(*dst)[ y * infoHdr.width + x ].y = fgetc(fd);
(*dst)[ y * infoHdr.width + x ].x = fgetc(fd);
}
//pass filling bytes
for(x = 0 ; x < (4 - (3*infoHdr.width)%4)%4 ; x++ ){
fgetc(fd);
}
}
printf("image file loaded successful! \n");
fclose(fd);
}
void saveBMPFile(uchar3 *dst , int width , int height , const char *name){
printf("in save bmp file\n");
BMPHeader hdr;
BMPInfoHeader infoHdr;
hdr.type = 0x4d42;
hdr.reserved1 = 0;
hdr.reserved2 = 0;
hdr.offset = 54;
hdr.size = hdr.offset + 3 * height * (width + (4-(width*3)%4)%4 );
infoHdr.size = 40;
infoHdr.width = width;
infoHdr.height = height;
infoHdr.planes = 1;
infoHdr.bitsPerPixel = 24;
infoHdr.compression = 0;
infoHdr.imageSize = hdr.size - hdr.offset;
infoHdr.xPelsPerMeter = 0;
infoHdr.yPelsPerMeter = 0;
infoHdr.clrImportant = 0;
infoHdr.clrUsed = 0;
FILE *fd;
if(! (fd=fopen(name,"wb"))){
printf("***** fail to open dest file *****\n");
return;
}
fwrite(&hdr , sizeof(hdr) , 1 , fd);
fwrite(&infoHdr , sizeof(infoHdr) , 1 , fd);
int x,y;
for(y = 0; y < infoHdr.height ; y++){
for(x = 0 ; x < infoHdr.width ; x++){
fputc( dst[ y * infoHdr.width + x ].z , fd);
fputc( dst[ y * infoHdr.width + x ].y , fd);
fputc( dst[ y * infoHdr.width + x ].x , fd);
// printf("save one pixel\n");
}
//pass filling bytes
for(x = 0 ; x < (4 - (3*infoHdr.width)%4)%4 ; x++ ){
fputc(0 , fd);
}
}
printf("image file writed over\n");
fclose(fd);
}
__global__ void genGauss(float *gauss , int size , float sig ){
__shared__ float sum;
sum=0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int mid = size/2 ;
float sigma = sig;
if(x<size && y<size){
gauss[y*size + x] = __expf(-0.5*((x - mid)*(x - mid)+(y - mid)*(y - mid)) / (sigma*sigma)) / (2 * 3.1415926 * sigma *sigma);
atomicAdd(&sum , gauss[y*size + x]);
}
__syncthreads();
if(x<size && y<size){
gauss[y*size + x] = gauss[y*size + x] /sum;
}
}
__device__ float cannyRgb2grayscale(uchar3 pix){
return (pix.x*0.299 + pix.y*0.587 + pix.z*0.114);
}
__global__ void rgb2grayscale(uchar3 * pix , int width , int height , float * ping){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
if(x < width && y < height){
ping[index] = pix[index].x*0.299 + pix[index].y*0.587 + pix[index].z*0.114;
}
}
__global__ void gaussBlur(float * ping, float * pang , int width , int height , float * gauss , int gaussSize){
__shared__ float gau[GauSize][GauSize];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
//load guass matrix
if(threadIdx.x<GauSize && threadIdx.y<GauSize){
gau[threadIdx.y][threadIdx.x] = gauss[ threadIdx.y * GauSize + threadIdx.x];
}
__syncthreads();
//blur
if(x<width && y<height ){
float blurValue = 0.0f;
int mid = GauSize/2 +1;
for(int i = mid-GauSize ; i <= GauSize-mid; i++){
for(int j = mid-GauSize ; j <= GauSize-mid; j++){
// blurValue += gau[i+mid-1][j+mid-1]*dst[(y+i)*width + x+j].x;
if((i+y)<height && (i+y)>=0 && (j+x)<width && (j+x)>=0 )
blurValue += gau[i+mid-1][j+mid-1] * ping[index + i*width +j];
}
}
// src[index].x = src[index].y = src[index].z = blurValue;
pang[index] = blurValue;
}
}
__global__ void calcGradient(float *pang,float *ping , float * dx , float *dy , int width , int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
if(x<width-1 && y< height-1){
dx[index] = pang[index + 1] - pang[index];
dy[index] = pang[index + width] - pang[index];
//ping is gradient now
ping[index] = sqrtf(dx[index]*dx[index]+dy[index]*dy[index] );
}
if(x<width && y< height && (x==width-1 || y==height-1)){
ping[index] = 0;
}
}
__global__ void nonMaxRestrain(float *pang, float *ping , float *dx , float *dy , int width , int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
if(x<width -2 && y<height -2 && x>0 &&y>0){
if(pang[index] <= 0.0000000001f && pang[index] >= -0.0000000001f){
ping[index] = 0.0f;
}
else{
float dxAbs = abs(dx[index]);
float dyAbs = abs(dy[index]);
float weight = 0;
float grad[4];
if(dyAbs > dxAbs){
weight = dxAbs/dyAbs;
grad[1] = pang[index - width];
grad[3] = pang[index + width];
if(dx[index]*dy[index]>0){
grad[0] = pang[index -width - 1];
grad[2] = pang[index +width + 1];
}
else{
grad[0] = pang[index -width + 1];
grad[2] = pang[index +width - 1];
}
}else{
weight = dyAbs / dxAbs;
grad[1] = pang[index - 1];
grad[3] = pang[index + 1];
if(dx[index]*dy[index]>0){
// grad[0] = pang[index +width - 1];
// grad[2] = pang[index -width + 1];
grad[0] = pang[index -width - 1];
grad[2] = pang[index +width + 1];
}
else{
// grad[0] = pang[index -width - 1];
// grad[2] = pang[index +width + 1];
grad[0] = pang[index -width + 1];
grad[2] = pang[index +width - 1];
}
}
//插值
// float grad1 = weight * grad[0] + (1-weight) * grad[1];
// float grad2 = weight * grad[2] + (1-weight) * grad[3];
float grad1 = weight * grad[1] + (1-weight) * grad[0];
float grad2 = weight * grad[3] + (1-weight) * grad[2];
if(pang[index] > grad1 && pang[index] > grad2){
ping[index] = pang[index];
} else{
ping[index] = 0.0f;
}
}
}//x<width y<height
// else{
// ping[index] = 0;
// }
}
__global__ void findMaxGrad(float * ping , int width , int height , float * max){
extern __shared__ float temp[];
int tid = threadIdx.x;
int idx = tid + blockDim.x * blockIdx.x;
if(idx<width*height){
temp[tid] = ping[idx];
}else{
temp[tid] = 0;
}
int prevSize = blockDim.x;
for(int d = blockDim.x >>1 ; d>0; d >>= 1){
__syncthreads();
if(tid<d ){
temp[tid] = temp[tid]>temp[tid+d]?temp[tid]:temp[tid + d];
//for arbitrary block size
if(prevSize > d*2 && (tid+2*d)<prevSize ){
temp[tid] = temp[tid]>temp[tid+2*d]?temp[tid]:temp[tid + 2*d];
}
prevSize = d;
}
}
if(tid==0)max[blockIdx.x] = temp[0];
}
__global__ void edgeTrace(float *ping , float *pang , float *maxptr , int width , int height , int iteration , float hsigma , float lsigma){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
float max = *maxptr;
float lhd = max * lsigma;
float hhd = max * hsigma;
for(int i =0 ; i<iteration ;i++){
__syncthreads();
if(x<width -1 && y<height -1 && x>0 && y>0){
if(ping[index] < lhd)pang[index] = 0;
else if(ping[index]>= hhd)pang[index]=1;
else {
if(ping[index-width-1]>=hhd || ping[index-width]>=hhd ||ping[index-width+1]>=hhd
|| ping[index-1]>=hhd || ping[index+1]>=hhd
|| ping[index+width-1]>=hhd || ping[index+width]>=hhd ||ping[index+width+1]>=hhd){
pang[index] = 1;
ping[index] = hhd;
}else{
pang[index] = 0;
ping[index] = hhd;
}
}
}//x, y
}
}
__global__ void edgeTraceOptimized(float *ping , float *pang , float *maxptr , int width , int height , int iteration , float hsigma , float lsigma){
__shared__ float tempPing[34][34] ;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
float max = *maxptr;
float lhd = max * lsigma;
float hhd = max * hsigma;
int tx = threadIdx.x+1;
int ty = threadIdx.y+1;
if(x<width && y < height)tempPing[threadIdx.y +1][threadIdx.x +1] = ping[index];
if(threadIdx.x ==0 ){
if(x>0 && x<width && y<height){
tempPing[threadIdx.y +1][0] = ping[index - 1];
}
else{
tempPing[threadIdx.y +1][0] = 0;
}
}
if(threadIdx.y ==0){
if(y>0 && x<width && y<height){
tempPing[0][threadIdx.x +1] = ping[index - width];
}
else {
tempPing[0][threadIdx.x +1] = 0;
}
}
if(threadIdx.x == 31){
if(x<width-1 && y<height){
tempPing[threadIdx.y +1][33] = ping[index+1];
}
else {
tempPing[threadIdx.y +1][33] = 0;
}
}
if(threadIdx.y ==31){
if(y<height-1 && x<width){
tempPing[33][threadIdx.x +1] = ping[index + width];
}
else{
tempPing[33][threadIdx.x +1] = 0;
}
}
if(threadIdx.x == 0 && threadIdx.y == 0){
if(x>0 && y >0 && x<width && y<height)tempPing[0][0] = ping[index -width -1];
else tempPing[0][0] = 0;
}
if(threadIdx.x == 31 && threadIdx.y == 0){
if(y>0 && x < width-1 && y <height)tempPing[0][33] = ping[index - width +1];
else tempPing[0][33] = 0;
}
if(threadIdx.x == 31 && threadIdx.y == 31){
if(y<height-1 && x<width-1)tempPing[33][33] = ping[index + width +1];
else tempPing[33][33] = 0;
}
if(threadIdx.x == 0 && threadIdx.y == 31){
if(x>0 && y<height-1 && x<width)tempPing[33][0] = ping[index + width -1];
else tempPing[33][0] = 0;
}
__syncthreads();
for(int i =0 ; i<iteration ;i++){
__syncthreads();
if(x<width && y<height){
if(tempPing[ty][tx] < lhd)pang[index] = 0;
else if(tempPing[ty][tx]>= hhd)pang[index]=1;
else {
if(tempPing[ty-1][tx-1]>=hhd || tempPing[ty-1][tx]>=hhd ||tempPing[ty-1][tx+1]>=hhd
|| tempPing[ty][tx-1]>=hhd || tempPing[ty][tx+1]>=hhd
|| tempPing[ty+1][tx-1]>=hhd || tempPing[ty+1][tx]>=hhd ||tempPing[ty+1][tx+1]>=hhd){
pang[index] = 1;
tempPing[ty][tx] = hhd;
}else{
pang[index] = 0;
tempPing[ty][tx]= hhd;
}
}
}//x, y
}
}
__global__ void trans2Bmp(float *ping , uchar3* imagedata , int width ,int height , int rev){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
if(x< width && y < height){
// imagedata[index].x = imagedata[index].y = imagedata[index].z = (ping[index]<25?0:255);
if(rev)imagedata[index].x = imagedata[index].y = imagedata[index].z = ping[index]*255;
else imagedata[index].x = imagedata[index].y = imagedata[index].z =(1- ping[index])*255;
// imagedata[index].x = imagedata[index].y = imagedata[index].z = ping[index];
}
}
void cookargument(int argc , char* argv[] , float *gaussSigma , float *hsigma , float *lsigma ,int *iteration , char** srcName , char** dstName , int *opti , int *rev){
//init
*gaussSigma = 1.2; //-gs
*hsigma = 0.2; //-hs
*lsigma = 0.1; //-ls
*iteration = 10; //-it
*opti =1; //-opt
*rev = 1; //-rev
//-src
//-dst
//no argument
if(argc ==1)return ;
int status;
for(int i=1 ; i<argc;i++){
//set gaussSigma
if(strcmp("-gs",argv[i])==0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%f" , gaussSigma);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}
else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set hsigma
if(strcmp("-hs" , argv[i]) == 0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%f" , hsigma);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set lsigma
if(strcmp("-ls" , argv[i]) == 0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%f" , lsigma);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set iteration
if(strcmp("-it" , argv[i]) == 0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%d" , iteration);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set src file name
if(strcmp("-src" , argv[i]) == 0){
if(i<(argc-1)){
i++;
*srcName = argv[i];
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set dst file name
if(strcmp("-dst" , argv[i]) == 0){
if(i<(argc-1)){
i++;
*dstName = argv[i];
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set opotimized or not
if(strcmp("-opt" , argv[i]) == 0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%d" , opti);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//set edge color 1 white 0 black
if(strcmp("-rev" , argv[i]) == 0){
if(i<(argc-1)){
i++;
status = sscanf(argv[i] , "%d" , rev);
if(status == 0 || status == EOF){
printf("*****invalid argument format*****\n");
exit(1);
}
}else{
printf("*****invalid argument format*****\n");
exit(1);
}
}
//print help
if(strcmp("-help" , argv[i]) == 0 ){
printf("-src srcname : set src image file, must be 24 bit bmp file\n");
printf("-dst dstname : set dst image file, must be 24 bit bmp file\n");
printf("-gs gaussSigma: set gaussSigma\n");
printf("-ls lowThreshold : set lowthreshold in canny\n");
printf("-hs highThreshold: set highthreshold in canny\n");
printf("-it iteration : set iteration number of tracing edge\n");
printf("-opt 1/0 : set ioptimimze tracing edge or not\n");
printf("-rev 1/0 : set edge color black(0)/white(1)\n");
printf("-help :show info like this\n");
exit(0);
}
}//for
//test print
// printf("gausssigma:%f\n", *gaussSigma);
// printf("lsigma:%f\n", *lsigma);
// printf("hsigma:%f\n", *hsigma);
// printf("iteration:%d\n", *iteration);
//check value
if(*iteration <1){
printf("*****invalid iteration*****\n");
exit(1);
}
if(*lsigma > *hsigma || *lsigma < 0 || *hsigma >1){
printf("*****invalid l/hsigma value*****\n");
exit(1);
}
if(*gaussSigma<=0){
printf("*****invalid gauss sigma value*****\n");
exit(1);
}
}
int main(int argc , char* argv[]){
clock_t start , end;
start = clock();
clock_t startCanny , endCanny;
uchar3 *imagedataSrc , *imagedataDst;
uchar3 *d_imagedataSrc ,*d_imagedataDst;
float *d_gauss;
float * d_ping, *d_pang;
float *d_dx , *d_dy;
float *d_max;
// float gauss[GauSize][GauSize];
int gaussSize = GauSize;
int width , height;
float gaussSigma =1.2;
float hsigma = 0.15;
float lsigma = 0.1;
int iteration = 10;
int optimized = 1;
int rev = 1;
// const char srcName[] = "../data/src3.bmp";
// const char dstName[] = "../data/out.bmp";
char *srcName = NULL ;
char *dstName = NULL ;
cookargument(argc , argv , &gaussSigma , &hsigma , &lsigma ,&iteration , &srcName , &dstName , &optimized,&rev);
if(srcName ==NULL)srcName = "../data/src3.bmp";
if(dstName ==NULL)dstName = "../data/out.bmp";
//load bmp format image file
loadBMPFile(&imagedataSrc , &width , &height , srcName );
// height /= 4;
cudaError_t cudaStatus = cudaMalloc((void**)&d_gauss , gaussSize*gaussSize*sizeof(float));
if(cudaStatus!=cudaSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
// cudaMalloc(&d_imagedataDst , width*height*sizeof(uchar3));
cudaStatus = cudaMalloc(&d_ping , width*height*sizeof(float));
if(cudaStatus!=cudaSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
cudaStatus = cudaMalloc(&d_pang , width*height*sizeof(float));
if(cudaStatus!=cudaSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
cudaStatus = cudaMalloc(&d_dx , width*height*sizeof(float));
if(cudaStatus!=cudaSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
cudaStatus = cudaMalloc(&d_dy , width*height*sizeof(float));
if(cudaStatus!=cudaSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
cudaStatus = cudaMalloc(&d_imagedataSrc , width*height*sizeof(uchar3));
if(cudaStatus!=cudaSuccess){
printf("*****fail to malloc gpu memory*****\n");
return 1;
}
// imagedataDst = (uchar3*)malloc( width*height*sizeof(uchar3));
cudaStatus = cudaMemcpy(d_imagedataSrc , imagedataSrc , width*height*sizeof(uchar3) , cudaMemcpyHostToDevice );
if(cudaStatus!=cudaSuccess){
printf("*****fail to copy memory*****\n");
return 1;
}
dim3 numofBlock(32 , 32 , 1);
dim3 threadsPerBlock((width+31)/32 , (height+31)/32, 1);
int threadnum = numofBlock.x*numofBlock.y*numofBlock.z;
int blocknum = (width*height+threadnum-1)/threadnum;
cudaMalloc(&d_max , blocknum*sizeof(float));
//main process in gpu
startCanny = clock();
//generate gaussian filter
genGauss<<<1 ,numofBlock >>>(d_gauss , gaussSize ,gaussSigma);
// cudaDeviceSynchronize();
//calc image's grayscale value
rgb2grayscale<<< threadsPerBlock, numofBlock>>>(d_imagedataSrc , width , height , d_ping );
// cudaDeviceSynchronize();
//use gaussian filter
gaussBlur<<< threadsPerBlock, numofBlock>>>(d_ping , d_pang , width , height , d_gauss , gaussSize);
// cudaDeviceSynchronize();
//calc gradient of every pixel
calcGradient<<< threadsPerBlock, numofBlock>>>(d_pang ,d_ping , d_dx , d_dy , width , height);
//non max gradient restrain
nonMaxRestrain<<< threadsPerBlock, numofBlock>>>(d_ping , d_pang , d_dx , d_dy , width , height);
//find max gradient of the whole image
//local max
findMaxGrad<<<blocknum,threadnum , threadnum*sizeof(float)>>>(d_pang , width , height , d_max);
//global max
findMaxGrad<<<1, blocknum , blocknum*sizeof(float)>>>(d_max , blocknum , 1 , d_max);
// cudaDeviceSynchronize();
// float max;
// cudaMemcpy(&max , d_max , sizeof(float) , cudaMemcpyDeviceToHost );
// printf("max is %f\n",max);
// printf("blocknum is %d\n" , blocknum);
//Tracing edges through the image and hysteresis thresholding
if(!optimized)edgeTrace<<<threadsPerBlock, numofBlock>>>(d_pang ,d_ping , d_max , width , height , iteration , hsigma , lsigma);
else edgeTraceOptimized<<<threadsPerBlock, numofBlock>>>(d_pang ,d_ping , d_max , width , height , iteration , hsigma , lsigma);
//translate bit value to bmp file format
trans2Bmp<<< threadsPerBlock, numofBlock>>>(d_ping , d_imagedataSrc , width , height,rev);
//main process end
cudaStatus = cudaDeviceSynchronize();
if(cudaStatus != cudaSuccess){
printf("*****kernel function down*****\n");
}
endCanny = clock();
printf("------canny use %ld ms\n" , endCanny - startCanny);
cudaStatus = cudaMemcpy(imagedataSrc , d_imagedataSrc , width*height*sizeof(uchar3) , cudaMemcpyDeviceToHost );
if(cudaStatus!=cudaSuccess){
printf("*****fail to copy memory from device*****\n");
return 1;
}
// for(int i=0 ; i<width*height/1024;i++){
// cudaStatus = cudaMemcpy(imagedataSrc+i*1024 , d_imagedataSrc+i*1024 , 1024*sizeof(uchar3) , cudaMemcpyDeviceToHost );
// if(cudaStatus!=cudaSuccess){
// printf("*****fail to copy memory from device*****\n");
// return 1;
// }
// }
// if((width*height)%1024!=0){
// cudaStatus = cudaMemcpy(imagedataSrc+i*1024 , d_imagedataSrc+i*1024 , ((width*height)%1024)*sizeof(uchar3) , cudaMemcpyDeviceToHost );
// if(cudaStatus!=cudaSuccess){
// printf("*****fail to copy memory from device*****\n");
// return 1;
// }
// }
printf("before save bmlp file\n");
printf("width:%d height:%d\n", width , height);
saveBMPFile(imagedataSrc ,width , height , dstName);
// cudaFree(d_imagedataDst);
cudaFree(d_imagedataSrc);
cudaFree(d_gauss);
cudaFree(d_ping);
cudaFree(d_pang);
cudaFree(d_dx);
cudaFree(d_dy);
cudaFree(d_max);
// free(imagedataDst);
free(imagedataSrc);
end = clock();
printf("------total use %ld ms\n" , end - start);
return 0;
}
|
c71cdc40541d92a20b2e9e072f0bdd7485491973.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//scan.cu
//#include "kernel.hip"
#include "comm.h"
#include "wtime.h"
#include <stdio.h>
#include "iostream"
#define max_thd 256
#define max_block 256
graph * mygraph;
__global__ void block_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd;
int i = threadIdx.x% max_thd;
index_t mycount=0;
// __shared__ vertex_t cache[256];
__shared__ index_t local[max_thd];
while(tid<Ne){
// vertex_t A = head[tid];
// vertex_t B = adj[tid];
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[i]=a[i*m/max_thd];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = max_thd;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[r];
if(X==Y){
//printf("find A %d B %d C %d\n",A,B,X);
mycount++;
bot = top + max_thd;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/max_thd;
top = top*m/max_thd -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += max_thd;
}
tid += GPU_PER_PART * gridDim.x*blockDim.x/256;
__syncthreads();
}
//reduce
__syncthreads();
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]+=val;
count[blockIdx.x]=val;
// if(val!=0)
// printf("+ %d\n",count[blockIdx.x]);
}
}
__global__ void vertex_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
// Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t mycount=0;
__shared__ index_t local[max_thd];
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = tid;
vertex_t* a = &(adj_list[begin[A]]);
//initial cache
index_t m = begin[A+1]-begin[A];//degree[A];
local[p*32+i]=a[i*m/32];
__syncthreads();
for(index_t i=0; i<m;i++){
vertex_t B = adj_list[begin[A]+i];
index_t n = begin[B+1]-begin[B];//degree[B];
vertex_t* b = &(adj_list[begin[B]]);
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
}
tid += blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]=val;
count[blockIdx.x]+=val;
}
__syncthreads();
//----------------------------------------------------
}
__global__ void warp_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
index_t mycount=0;
__shared__ index_t local[max_thd];
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
//if(i==0) printf("A %d B %d\n");
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[p*32+i]=a[i*m/32];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
tid += GPU_NUM* blockDim.x*gridDim.x/32;
// tid += blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]=val;
count[blockIdx.x]+=val;
}
__syncthreads();
}
__global__ void init_count(index_t* count)
{
int tid = threadIdx.x;
count[tid] = 0;
}
__global__ void reduce_kernel(index_t* count)
{
index_t val = 0;
for(int i=0; i<max_block; i++){
val += count[i];
}
count[0] = val;
}
//---------------------------------------- cpu function--------------------
//------------------------------------------------------------------
void graph::initDevice(int GPU_id,int Part_id){
//cuda memory copy of partAdj and partBegin
hipSetDevice(GPU_id);
int P=Part_id;
H_ERR(hipDeviceSynchronize() );
vertex_t* dev_adj;
index_t* dev_begin;
index_t* dev_count;
index_t EdgeCount = partEdgeCount[P];
vertex_t* Adj = partAdj[P];
index_t* Begin = partBegin[P];
H_ERR(hipMalloc(&dev_adj, EdgeCount*sizeof(vertex_t)) );
H_ERR(hipMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) );
H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) );
H_ERR(hipMemcpy(dev_adj, Adj, EdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) );
H_ERR(hipMemcpy(dev_begin, Begin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) );
gdata[GPU_id].adj = dev_adj;
gdata[GPU_id].begin = dev_begin;
gdata[GPU_id].count = dev_count;
hipLaunchKernelGGL(( init_count) , dim3(1),dim3(max_thd), 0, 0, dev_count);
}
void graph::DeviceCompute(int GPU_id){
vertex_t* dev_adj =gdata[GPU_id].adj;
index_t* dev_begin =gdata[GPU_id].begin;
index_t* dev_count =gdata[GPU_id].count;
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( vertex_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0,
dev_adj,
dev_begin,
0,
vert_count,
dev_count
);
}
void graph::gpuReduce(int GPU_id){
vertex_t* dev_adj =gdata[GPU_id].adj;
index_t* dev_begin =gdata[GPU_id].begin;
index_t* dev_count =gdata[GPU_id].count;
Edge** buffer =gdata[GPU_id].EdgeBuffer;
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( reduce_kernel) , dim3(1),dim3(max_thd), 0, 0, dev_count);
H_ERR(hipMemcpy(&count[GPU_id], dev_count, sizeof(index_t), hipMemcpyDeviceToHost));
// thd_count += count[i];
// count[i] = thd_count;
H_ERR(hipFree(dev_adj) );
H_ERR(hipFree(dev_begin) );
H_ERR(hipFree(dev_count) );
// cout<<"GPU "<<GPU_id<<" finished"<<endl;
}
void graph::gpuProc(int GPU_id){
double t0 = wtime();
index_t total_count=0;
// for(int P=0; P<PART_NUM; P++){
// int P = GPU_id/4;
int P=0;
// if(PART_NUM > 1) int P = GPU_id%PART_NUM;
initDevice(GPU_id,P);
// while(1){
// index_t i=__sync_fetch_and_add(&chunk_proc[P],1);
// if(i>=(upperEdgeCount-1)/BufferSize+1) break;
// cpuCompute(P,i);
// }
// for(index_t i=GPU_id; i<(upperEdgeCount-1)/BufferSize+1; i+=8 ){
// for(index_t i=0; i<(upperEdgeCount-1)/BufferSize+1; i++ ){
// if(i%8<6)
// DeviceCompute(GPU_id,i);
DeviceCompute(GPU_id);
// }
gpuReduce(GPU_id);
// total_count += count[GPU_id];
// }
count[GPU_id] = total_count;
double t1 = wtime();
cout<<"GPU "<<GPU_id<<" time = "<<t1-t0<<endl;
}
| c71cdc40541d92a20b2e9e072f0bdd7485491973.cu | //scan.cu
//#include "kernel.cu"
#include "comm.h"
#include "wtime.h"
#include <stdio.h>
#include "iostream"
#define max_thd 256
#define max_block 256
graph * mygraph;
__global__ void block_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd;
int i = threadIdx.x% max_thd;
index_t mycount=0;
// __shared__ vertex_t cache[256];
__shared__ index_t local[max_thd];
while(tid<Ne){
// vertex_t A = head[tid];
// vertex_t B = adj[tid];
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[i]=a[i*m/max_thd];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = max_thd;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[r];
if(X==Y){
//printf("find A %d B %d C %d\n",A,B,X);
mycount++;
bot = top + max_thd;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/max_thd;
top = top*m/max_thd -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += max_thd;
}
tid += GPU_PER_PART * gridDim.x*blockDim.x/256;
__syncthreads();
}
//reduce
__syncthreads();
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]+=val;
count[blockIdx.x]=val;
// if(val!=0)
// printf("+ %d\n",count[blockIdx.x]);
}
}
__global__ void vertex_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
// Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t mycount=0;
__shared__ index_t local[max_thd];
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = tid;
vertex_t* a = &(adj_list[begin[A]]);
//initial cache
index_t m = begin[A+1]-begin[A];//degree[A];
local[p*32+i]=a[i*m/32];
__syncthreads();
for(index_t i=0; i<m;i++){
vertex_t B = adj_list[begin[A]+i];
index_t n = begin[B+1]-begin[B];//degree[B];
vertex_t* b = &(adj_list[begin[B]]);
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
}
tid += blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]=val;
count[blockIdx.x]+=val;
}
__syncthreads();
//----------------------------------------------------
}
__global__ void warp_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
index_t mycount=0;
__shared__ index_t local[max_thd];
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
//if(i==0) printf("A %d B %d\n");
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[p*32+i]=a[i*m/32];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
tid += GPU_NUM* blockDim.x*gridDim.x/32;
// tid += blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]=val;
count[blockIdx.x]+=val;
}
__syncthreads();
}
__global__ void init_count(index_t* count)
{
int tid = threadIdx.x;
count[tid] = 0;
}
__global__ void reduce_kernel(index_t* count)
{
index_t val = 0;
for(int i=0; i<max_block; i++){
val += count[i];
}
count[0] = val;
}
//---------------------------------------- cpu function--------------------
//------------------------------------------------------------------
void graph::initDevice(int GPU_id,int Part_id){
//cuda memory copy of partAdj and partBegin
cudaSetDevice(GPU_id);
int P=Part_id;
H_ERR(cudaDeviceSynchronize() );
vertex_t* dev_adj;
index_t* dev_begin;
index_t* dev_count;
index_t EdgeCount = partEdgeCount[P];
vertex_t* Adj = partAdj[P];
index_t* Begin = partBegin[P];
H_ERR(cudaMalloc(&dev_adj, EdgeCount*sizeof(vertex_t)) );
H_ERR(cudaMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) );
H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) );
H_ERR(cudaMemcpy(dev_adj, Adj, EdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) );
H_ERR(cudaMemcpy(dev_begin, Begin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) );
gdata[GPU_id].adj = dev_adj;
gdata[GPU_id].begin = dev_begin;
gdata[GPU_id].count = dev_count;
init_count <<<1,max_thd>>>(dev_count);
}
void graph::DeviceCompute(int GPU_id){
vertex_t* dev_adj =gdata[GPU_id].adj;
index_t* dev_begin =gdata[GPU_id].begin;
index_t* dev_count =gdata[GPU_id].count;
H_ERR(cudaDeviceSynchronize() );
vertex_binary_kernel<<<max_block,max_thd>>>
(
dev_adj,
dev_begin,
0,
vert_count,
dev_count
);
}
void graph::gpuReduce(int GPU_id){
vertex_t* dev_adj =gdata[GPU_id].adj;
index_t* dev_begin =gdata[GPU_id].begin;
index_t* dev_count =gdata[GPU_id].count;
Edge** buffer =gdata[GPU_id].EdgeBuffer;
H_ERR(cudaDeviceSynchronize() );
reduce_kernel <<<1,max_thd>>>(dev_count);
H_ERR(cudaMemcpy(&count[GPU_id], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost));
// thd_count += count[i];
// count[i] = thd_count;
H_ERR(cudaFree(dev_adj) );
H_ERR(cudaFree(dev_begin) );
H_ERR(cudaFree(dev_count) );
// cout<<"GPU "<<GPU_id<<" finished"<<endl;
}
void graph::gpuProc(int GPU_id){
double t0 = wtime();
index_t total_count=0;
// for(int P=0; P<PART_NUM; P++){
// int P = GPU_id/4;
int P=0;
// if(PART_NUM > 1) int P = GPU_id%PART_NUM;
initDevice(GPU_id,P);
// while(1){
// index_t i=__sync_fetch_and_add(&chunk_proc[P],1);
// if(i>=(upperEdgeCount-1)/BufferSize+1) break;
// cpuCompute(P,i);
// }
// for(index_t i=GPU_id; i<(upperEdgeCount-1)/BufferSize+1; i+=8 ){
// for(index_t i=0; i<(upperEdgeCount-1)/BufferSize+1; i++ ){
// if(i%8<6)
// DeviceCompute(GPU_id,i);
DeviceCompute(GPU_id);
// }
gpuReduce(GPU_id);
// total_count += count[GPU_id];
// }
count[GPU_id] = total_count;
double t1 = wtime();
cout<<"GPU "<<GPU_id<<" time = "<<t1-t0<<endl;
}
|
32c99065faee23521812653838173daa6077a281.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "convolutionSeparable31.hpp"
#include "defines.hpp"
#define ROW_BLOCK_DIM_X31 32
#define ROW_BLOCK_DIM_Y31 16
__constant__ float c_Kernel31[256];
void setConvolutionKernel31(float* h_Kernel, int k_length)
{
hipMemcpyToSymbol(c_Kernel31, h_Kernel, k_length * sizeof(float));
}
__global__ void rowConvolutionFilter31(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
){
__shared__ float sData[ROW_BLOCK_DIM_Y31][(ROW_RESULT_STEP + 2*ROW_HALO_STEP) * ROW_BLOCK_DIM_X31];
//offset to left halo edge
const int baseX = (blockIdx.x * ROW_RESULT_STEP) * ROW_BLOCK_DIM_X31 - ROW_HALO_STEP * ROW_BLOCK_DIM_X31 + threadIdx.x;
const int baseY = blockIdx.y * ROW_BLOCK_DIM_Y31 + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//load main data
#pragma unroll
for (int i = ROW_HALO_STEP; i < ROW_HALO_STEP + ROW_RESULT_STEP; i++) {
sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X31] = (baseX + i * ROW_BLOCK_DIM_X31) < imageW ? d_Src[i*ROW_BLOCK_DIM_X31] : 0;
}
//load left halo
#pragma unroll
for (int i = 0; i < ROW_HALO_STEP; i++) {
sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X31] = (baseX + i * ROW_BLOCK_DIM_X31) >= 0 ? d_Src[i*ROW_BLOCK_DIM_X31] : 0;
}
//load right halo
#pragma unroll
for (int i = ROW_HALO_STEP + ROW_RESULT_STEP; i < ROW_HALO_STEP + ROW_RESULT_STEP + ROW_HALO_STEP; i++) {
sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X31] = (baseX + i * ROW_BLOCK_DIM_X31) < imageW ? d_Src[i * ROW_BLOCK_DIM_X31] : 0;
}
__syncthreads();
if (baseY >= imageH) {
return;
}
//convolve
#pragma unroll
for (int i = ROW_HALO_STEP; i < ROW_HALO_STEP+ROW_RESULT_STEP; i++){
if(baseX + i * ROW_BLOCK_DIM_X31 < imageW){
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS31; j <= KERNEL_RADIUS31; j++) {
sum += c_Kernel31[KERNEL_RADIUS31 - j] * sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X31 + j];
}
d_Dst[i*ROW_BLOCK_DIM_X31] = sum;
}
}
}
#define COL_BLOCK_DIM_X31 16
#define COL_BLOCK_DIM_Y31 32
__global__ void colConvolutionFilter31(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__shared__ float sData[COL_BLOCK_DIM_X31][(COL_RESULT_STEP + 2 * COL_HALO_STEP) * COL_BLOCK_DIM_Y31 + 1]; //+1 to avoid shared mem bank conflicts
const int baseX = blockIdx.x * COL_BLOCK_DIM_X31 + threadIdx.x;
const int baseY = blockIdx.y * COL_BLOCK_DIM_Y31 * COL_RESULT_STEP - COL_HALO_STEP * COL_BLOCK_DIM_Y31 + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//load main data
#pragma unroll
for (int i = COL_HALO_STEP; i < COL_HALO_STEP + COL_RESULT_STEP; i++) {
sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y31] = (baseY + i * COL_BLOCK_DIM_Y31) < imageH ? d_Src[i * COL_BLOCK_DIM_Y31 * pitch] : 0;
}
//load top halo
#pragma unroll
for (int i = 0; i < COL_HALO_STEP; i ++) {
sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y31] = (baseY + i * COL_BLOCK_DIM_Y31) >= 0 ? d_Src[i * COL_BLOCK_DIM_Y31 * pitch] : 0;
}
//load bottom halo
#pragma unroll
for (int i = COL_HALO_STEP + COL_RESULT_STEP; i < COL_HALO_STEP + COL_RESULT_STEP + COL_HALO_STEP; i++) {
sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y31] = (baseY + i * COL_BLOCK_DIM_Y31) < imageH ? d_Src[i * COL_BLOCK_DIM_Y31 * pitch] : 0;
}
__syncthreads();
if (baseX >= imageW) {
return;
}
//convolve
#pragma unroll
for (int i = COL_HALO_STEP; i < COL_HALO_STEP + COL_RESULT_STEP; i++) {
if ((baseY + i * COL_BLOCK_DIM_Y31) < imageH) {
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS31; j <= KERNEL_RADIUS31; j++) {
sum += c_Kernel31[KERNEL_RADIUS31 - j] * sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y31 + j];
}
d_Dst[i * COL_BLOCK_DIM_Y31 * pitch] = sum;
}
}
}
void rowConvolve31(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
){
dim3 blocks((imageW + (ROW_RESULT_STEP * ROW_BLOCK_DIM_X31) - 1) / (ROW_RESULT_STEP * ROW_BLOCK_DIM_X31), (imageH + ROW_BLOCK_DIM_Y31 - 1) / ROW_BLOCK_DIM_Y31);
dim3 threads(ROW_BLOCK_DIM_X31, ROW_BLOCK_DIM_Y31);
hipLaunchKernelGGL(( rowConvolutionFilter31), dim3(blocks),dim3(threads), 0, 0, d_Dst,d_Src,imageW,imageH,pitch);
}
void colConvolve31(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
){
dim3 blocks((imageW + COL_BLOCK_DIM_X31 - 1) / COL_BLOCK_DIM_X31, (imageH + COL_BLOCK_DIM_Y31 * COL_RESULT_STEP - 1) / (COL_BLOCK_DIM_Y31 * COL_RESULT_STEP));
dim3 threads(COL_BLOCK_DIM_X31, COL_BLOCK_DIM_Y31);
hipLaunchKernelGGL(( colConvolutionFilter31), dim3(blocks),dim3(threads), 0, 0, d_Dst,d_Src,imageW,imageH,pitch);
} | 32c99065faee23521812653838173daa6077a281.cu | #include "convolutionSeparable31.hpp"
#include "defines.hpp"
#define ROW_BLOCK_DIM_X31 32
#define ROW_BLOCK_DIM_Y31 16
__constant__ float c_Kernel31[256];
void setConvolutionKernel31(float* h_Kernel, int k_length)
{
cudaMemcpyToSymbol(c_Kernel31, h_Kernel, k_length * sizeof(float));
}
__global__ void rowConvolutionFilter31(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
){
__shared__ float sData[ROW_BLOCK_DIM_Y31][(ROW_RESULT_STEP + 2*ROW_HALO_STEP) * ROW_BLOCK_DIM_X31];
//offset to left halo edge
const int baseX = (blockIdx.x * ROW_RESULT_STEP) * ROW_BLOCK_DIM_X31 - ROW_HALO_STEP * ROW_BLOCK_DIM_X31 + threadIdx.x;
const int baseY = blockIdx.y * ROW_BLOCK_DIM_Y31 + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//load main data
#pragma unroll
for (int i = ROW_HALO_STEP; i < ROW_HALO_STEP + ROW_RESULT_STEP; i++) {
sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X31] = (baseX + i * ROW_BLOCK_DIM_X31) < imageW ? d_Src[i*ROW_BLOCK_DIM_X31] : 0;
}
//load left halo
#pragma unroll
for (int i = 0; i < ROW_HALO_STEP; i++) {
sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X31] = (baseX + i * ROW_BLOCK_DIM_X31) >= 0 ? d_Src[i*ROW_BLOCK_DIM_X31] : 0;
}
//load right halo
#pragma unroll
for (int i = ROW_HALO_STEP + ROW_RESULT_STEP; i < ROW_HALO_STEP + ROW_RESULT_STEP + ROW_HALO_STEP; i++) {
sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X31] = (baseX + i * ROW_BLOCK_DIM_X31) < imageW ? d_Src[i * ROW_BLOCK_DIM_X31] : 0;
}
__syncthreads();
if (baseY >= imageH) {
return;
}
//convolve
#pragma unroll
for (int i = ROW_HALO_STEP; i < ROW_HALO_STEP+ROW_RESULT_STEP; i++){
if(baseX + i * ROW_BLOCK_DIM_X31 < imageW){
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS31; j <= KERNEL_RADIUS31; j++) {
sum += c_Kernel31[KERNEL_RADIUS31 - j] * sData[threadIdx.y][threadIdx.x + i * ROW_BLOCK_DIM_X31 + j];
}
d_Dst[i*ROW_BLOCK_DIM_X31] = sum;
}
}
}
#define COL_BLOCK_DIM_X31 16
#define COL_BLOCK_DIM_Y31 32
__global__ void colConvolutionFilter31(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__shared__ float sData[COL_BLOCK_DIM_X31][(COL_RESULT_STEP + 2 * COL_HALO_STEP) * COL_BLOCK_DIM_Y31 + 1]; //+1 to avoid shared mem bank conflicts
const int baseX = blockIdx.x * COL_BLOCK_DIM_X31 + threadIdx.x;
const int baseY = blockIdx.y * COL_BLOCK_DIM_Y31 * COL_RESULT_STEP - COL_HALO_STEP * COL_BLOCK_DIM_Y31 + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//load main data
#pragma unroll
for (int i = COL_HALO_STEP; i < COL_HALO_STEP + COL_RESULT_STEP; i++) {
sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y31] = (baseY + i * COL_BLOCK_DIM_Y31) < imageH ? d_Src[i * COL_BLOCK_DIM_Y31 * pitch] : 0;
}
//load top halo
#pragma unroll
for (int i = 0; i < COL_HALO_STEP; i ++) {
sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y31] = (baseY + i * COL_BLOCK_DIM_Y31) >= 0 ? d_Src[i * COL_BLOCK_DIM_Y31 * pitch] : 0;
}
//load bottom halo
#pragma unroll
for (int i = COL_HALO_STEP + COL_RESULT_STEP; i < COL_HALO_STEP + COL_RESULT_STEP + COL_HALO_STEP; i++) {
sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y31] = (baseY + i * COL_BLOCK_DIM_Y31) < imageH ? d_Src[i * COL_BLOCK_DIM_Y31 * pitch] : 0;
}
__syncthreads();
if (baseX >= imageW) {
return;
}
//convolve
#pragma unroll
for (int i = COL_HALO_STEP; i < COL_HALO_STEP + COL_RESULT_STEP; i++) {
if ((baseY + i * COL_BLOCK_DIM_Y31) < imageH) {
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS31; j <= KERNEL_RADIUS31; j++) {
sum += c_Kernel31[KERNEL_RADIUS31 - j] * sData[threadIdx.x][threadIdx.y + i * COL_BLOCK_DIM_Y31 + j];
}
d_Dst[i * COL_BLOCK_DIM_Y31 * pitch] = sum;
}
}
}
void rowConvolve31(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
){
dim3 blocks((imageW + (ROW_RESULT_STEP * ROW_BLOCK_DIM_X31) - 1) / (ROW_RESULT_STEP * ROW_BLOCK_DIM_X31), (imageH + ROW_BLOCK_DIM_Y31 - 1) / ROW_BLOCK_DIM_Y31);
dim3 threads(ROW_BLOCK_DIM_X31, ROW_BLOCK_DIM_Y31);
rowConvolutionFilter31<<<blocks,threads>>>(d_Dst,d_Src,imageW,imageH,pitch);
}
void colConvolve31(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
){
dim3 blocks((imageW + COL_BLOCK_DIM_X31 - 1) / COL_BLOCK_DIM_X31, (imageH + COL_BLOCK_DIM_Y31 * COL_RESULT_STEP - 1) / (COL_BLOCK_DIM_Y31 * COL_RESULT_STEP));
dim3 threads(COL_BLOCK_DIM_X31, COL_BLOCK_DIM_Y31);
colConvolutionFilter31<<<blocks,threads>>>(d_Dst,d_Src,imageW,imageH,pitch);
} |
2e40a0cb57700dd4a93b26ae7b3713b8d0380313.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHReduceApplyUtils.cuh"
#include <assert.h>
#include <stdlib.h>
// Maximum size per grid dimension that we assume (compute capability >= 2.0)
#define MAX_GRID_SIZE 65535L
bool THC_canUse32BitIndexMath(THCState* state, THCudaTensor* t) {
long elements = THCudaTensor_nElement(state, t);
if (elements >= UINT_MAX) {
return false;
}
long offset = 0;
long linearId = elements - 1;
for (int i = THCudaTensor_nDimension(state, t) - 1; i >= 0; --i) {
long curDimIndex = linearId % THCudaTensor_size(state, t, i);
long curDimOffset = curDimIndex * THCudaTensor_stride(state, t, i);
offset += curDimOffset;
linearId /= THCudaTensor_size(state, t, i);
}
if (offset >= UINT_MAX) {
return false;
}
return true;
}
bool THC_getGridFromTiles(long gridTiles, dim3& grid) {
if (gridTiles > MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE) {
return false;
}
long gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
long gridY = 1;
long gridZ = 1;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = THCCeilDiv(gridTiles, MAX_GRID_SIZE);
gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = THCCeilDiv(gridTiles, MAX_GRID_SIZE);
gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
}
}
grid = dim3(gridX, gridY, gridZ);
return true;
}
namespace {
struct SizeAndStride {
long size;
long stride;
};
int compareSizeAndStride(const void* a, const void* b) {
const SizeAndStride* aS = (const SizeAndStride*) a;
const SizeAndStride* bS = (const SizeAndStride*) b;
return aS->stride < bS->stride;
}
}
bool THC_overlappingIndices(THCState* state, THCudaTensor* t) {
// In this function, we don't care about permutations of the
// size/stride arrays (transpositions).
// We order the size/stride arrays by stride, skipping dimensions of
// size 1. Strides of dimensions of size 1 don't matter, since there
// is only one addressing point in them.
// In this reordered view, the tensor is contiguous if
// stride[dim] == size[dim + 1] * stride[dim + 1] for all `dim`.
// The tensor has holes if
// stride[dim] > size[dim + 1] * stride[dim + 1] for one or more
// `dim`.
// The tensor has overlaps if
// stride[dim] < size[dim + 1] * stride[dim + 1] for one or more
// `dim`, or the innermost stride is 0.
// Extract size/stride arrays; only consider size >1 dims.
SizeAndStride info[MAX_CUTORCH_DIMS];
int dims = THCudaTensor_nDimension(state, t);
int nonSize1Dims = 0;
for (int i = 0; i < dims; ++i) {
long size = THCudaTensor_size(state, t, i);
if (size > 1) {
info[nonSize1Dims].size = size;
info[nonSize1Dims].stride = THCudaTensor_stride(state, t, i);
++nonSize1Dims;
}
}
if (nonSize1Dims == 0) {
// no overlap
return false;
}
// Ascending order (innermost dimension in sorted view is at [0])
qsort(info, nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride);
// Base case: innermost dimension must have stride >= 1
if (info[nonSize1Dims - 1].stride < 1) {
return true;
}
// Subsequent dimensions, if any
for (int i = nonSize1Dims - 2; i >= 0; --i) {
if (info[i].stride < info[i + 1].size * info[i + 1].stride) {
// There are overlaps
return true;
}
}
// Tensor has holes or is contiguous
return false;
}
| 2e40a0cb57700dd4a93b26ae7b3713b8d0380313.cu | #include "THCReduceApplyUtils.cuh"
#include <assert.h>
#include <stdlib.h>
// Maximum size per grid dimension that we assume (compute capability >= 2.0)
#define MAX_GRID_SIZE 65535L
bool THC_canUse32BitIndexMath(THCState* state, THCudaTensor* t) {
long elements = THCudaTensor_nElement(state, t);
if (elements >= UINT_MAX) {
return false;
}
long offset = 0;
long linearId = elements - 1;
for (int i = THCudaTensor_nDimension(state, t) - 1; i >= 0; --i) {
long curDimIndex = linearId % THCudaTensor_size(state, t, i);
long curDimOffset = curDimIndex * THCudaTensor_stride(state, t, i);
offset += curDimOffset;
linearId /= THCudaTensor_size(state, t, i);
}
if (offset >= UINT_MAX) {
return false;
}
return true;
}
bool THC_getGridFromTiles(long gridTiles, dim3& grid) {
if (gridTiles > MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE) {
return false;
}
long gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
long gridY = 1;
long gridZ = 1;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = THCCeilDiv(gridTiles, MAX_GRID_SIZE);
gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = THCCeilDiv(gridTiles, MAX_GRID_SIZE);
gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
}
}
grid = dim3(gridX, gridY, gridZ);
return true;
}
namespace {
struct SizeAndStride {
long size;
long stride;
};
int compareSizeAndStride(const void* a, const void* b) {
const SizeAndStride* aS = (const SizeAndStride*) a;
const SizeAndStride* bS = (const SizeAndStride*) b;
return aS->stride < bS->stride;
}
}
bool THC_overlappingIndices(THCState* state, THCudaTensor* t) {
// In this function, we don't care about permutations of the
// size/stride arrays (transpositions).
// We order the size/stride arrays by stride, skipping dimensions of
// size 1. Strides of dimensions of size 1 don't matter, since there
// is only one addressing point in them.
// In this reordered view, the tensor is contiguous if
// stride[dim] == size[dim + 1] * stride[dim + 1] for all `dim`.
// The tensor has holes if
// stride[dim] > size[dim + 1] * stride[dim + 1] for one or more
// `dim`.
// The tensor has overlaps if
// stride[dim] < size[dim + 1] * stride[dim + 1] for one or more
// `dim`, or the innermost stride is 0.
// Extract size/stride arrays; only consider size >1 dims.
SizeAndStride info[MAX_CUTORCH_DIMS];
int dims = THCudaTensor_nDimension(state, t);
int nonSize1Dims = 0;
for (int i = 0; i < dims; ++i) {
long size = THCudaTensor_size(state, t, i);
if (size > 1) {
info[nonSize1Dims].size = size;
info[nonSize1Dims].stride = THCudaTensor_stride(state, t, i);
++nonSize1Dims;
}
}
if (nonSize1Dims == 0) {
// no overlap
return false;
}
// Ascending order (innermost dimension in sorted view is at [0])
qsort(info, nonSize1Dims, sizeof(SizeAndStride), compareSizeAndStride);
// Base case: innermost dimension must have stride >= 1
if (info[nonSize1Dims - 1].stride < 1) {
return true;
}
// Subsequent dimensions, if any
for (int i = nonSize1Dims - 2; i >= 0; --i) {
if (info[i].stride < info[i + 1].size * info[i + 1].stride) {
// There are overlaps
return true;
}
}
// Tensor has holes or is contiguous
return false;
}
|
c6892b0e525c9a41bd69e975ea53eb29e81104d9.hip | // !!! This is a file automatically generated by hipify!!!
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
| c6892b0e525c9a41bd69e975ea53eb29e81104d9.cu | // generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_unroll_width<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
f1ba4295fac3c730eb7022617fc3c8b3af696b24.hip | // !!! This is a file automatically generated by hipify!!!
#if ACCELERATE_MODE == ACCELERATE_MODE_CUDA
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
template <class T>
__host__ void getLaunchConfiguration(T t, int n, int *blocks, int *threads) {
hipOccupancyMaxPotentialBlockSize(blocks, threads, t, 0, n);
*blocks = (n + *threads - 1) / *threads;
}
__global__ void vec_lgamma(double *a, double *c, const unsigned int n) {
const long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
c[idx] = lgamma(a[idx]);
}
}
__host__ double *cu_lgammed(const int rows, const int cols, double *iData) {
auto N = rows * cols;
double *C_accelerate_data = nullptr;
hipMalloc((void **) &C_accelerate_data, rows * cols * sizeof(double));
int blocks, threads;
getLaunchConfiguration(vec_lgamma, N, &blocks, &threads);
hipLaunchKernelGGL(( vec_lgamma), dim3(blocks), dim3(threads), 0, 0, iData, C_accelerate_data, N);
hipDeviceSynchronize();
return C_accelerate_data;
}
__global__ void vec_add(double *a, double *b, double *c, const unsigned int n) {
const long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
c[idx] = a[idx] + b[idx];
}
}
__global__ void vec_sub(double *a, double *b, double *c, const unsigned int n) {
const long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
c[idx] = a[idx] - b[idx];
}
}
__host__ double *cu_add(const int rows, const int cols, double *m1, double *m2) {
auto N = rows * cols;
double *C_accelerate_data = nullptr;
hipMalloc((void **) &C_accelerate_data, rows * cols * sizeof(double));
int blocks, threads;
getLaunchConfiguration(vec_add, N, &blocks, &threads);
hipLaunchKernelGGL(( vec_add), dim3(blocks), dim3(threads), 0, 0, m1, m2, C_accelerate_data, N);
hipDeviceSynchronize();
return C_accelerate_data;
}
__host__ double *cu_sub(const int rows, const int cols, double *m1, double *m2) {
auto N = rows * cols;
double *C_accelerate_data = nullptr;
hipMalloc((void **) &C_accelerate_data, rows * cols * sizeof(double));
int blocks, threads;
getLaunchConfiguration(vec_sub, N, &blocks, &threads);
hipLaunchKernelGGL(( vec_sub), dim3(blocks), dim3(threads), 0, 0, m1, m2, C_accelerate_data, N);
hipDeviceSynchronize();
return C_accelerate_data;
}
#endif
| f1ba4295fac3c730eb7022617fc3c8b3af696b24.cu | #if ACCELERATE_MODE == ACCELERATE_MODE_CUDA
#include <cuda.h>
#include <cuda_runtime_api.h>
template <class T>
__host__ void getLaunchConfiguration(T t, int n, int *blocks, int *threads) {
cudaOccupancyMaxPotentialBlockSize(blocks, threads, t, 0, n);
*blocks = (n + *threads - 1) / *threads;
}
__global__ void vec_lgamma(double *a, double *c, const unsigned int n) {
const long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
c[idx] = lgamma(a[idx]);
}
}
__host__ double *cu_lgammed(const int rows, const int cols, double *iData) {
auto N = rows * cols;
double *C_accelerate_data = nullptr;
cudaMalloc((void **) &C_accelerate_data, rows * cols * sizeof(double));
int blocks, threads;
getLaunchConfiguration(vec_lgamma, N, &blocks, &threads);
vec_lgamma<<<blocks, threads>>>(iData, C_accelerate_data, N);
cudaDeviceSynchronize();
return C_accelerate_data;
}
__global__ void vec_add(double *a, double *b, double *c, const unsigned int n) {
const long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
c[idx] = a[idx] + b[idx];
}
}
__global__ void vec_sub(double *a, double *b, double *c, const unsigned int n) {
const long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
c[idx] = a[idx] - b[idx];
}
}
__host__ double *cu_add(const int rows, const int cols, double *m1, double *m2) {
auto N = rows * cols;
double *C_accelerate_data = nullptr;
cudaMalloc((void **) &C_accelerate_data, rows * cols * sizeof(double));
int blocks, threads;
getLaunchConfiguration(vec_add, N, &blocks, &threads);
vec_add<<<blocks, threads>>>(m1, m2, C_accelerate_data, N);
cudaDeviceSynchronize();
return C_accelerate_data;
}
__host__ double *cu_sub(const int rows, const int cols, double *m1, double *m2) {
auto N = rows * cols;
double *C_accelerate_data = nullptr;
cudaMalloc((void **) &C_accelerate_data, rows * cols * sizeof(double));
int blocks, threads;
getLaunchConfiguration(vec_sub, N, &blocks, &threads);
vec_sub<<<blocks, threads>>>(m1, m2, C_accelerate_data, N);
cudaDeviceSynchronize();
return C_accelerate_data;
}
#endif
|
b910b4ee4456119607db1526f137fe3b5639b391.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if _WIN32
#pragma warning(disable : 4267)
#endif
#include "./to_gray.h"
#include "../utils/cuda_helper.h"
__global__ void toGrayKernel(hipSurfaceObject_t image, int width, int height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
uchar4 color;
surf2Dread(&color, image, x * 4, y);
unsigned char gray = 0.2989 * color.x + 0.5870 * color.y + 0.1140 * color.z;
color.x = gray;
color.y = gray;
color.z = gray;
surf2Dwrite(color, image, x * 4, y);
}
ToGray::ToGray(std::shared_ptr<CudaArrayProvider> imageProvider)
: imageProvider(imageProvider)
{
}
void ToGray::runKernel()
{
imageProvider->map();
auto resDesc = imageProvider->getResourceDesc();
hipCreateSurfaceObject(&image, &resDesc);
dim3 dimBlock(32, 32, 1);
dim3 dimGrid(divUp(imageProvider->getWidth(), dimBlock.x),
divUp(imageProvider->getHeight(), dimBlock.y), 1);
hipLaunchKernelGGL(( toGrayKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, image, imageProvider->getWidth(),
imageProvider->getHeight());
HANDLE_ERROR(hipDeviceSynchronize());
imageProvider->unmap();
}
| b910b4ee4456119607db1526f137fe3b5639b391.cu | #if _WIN32
#pragma warning(disable : 4267)
#endif
#include "./to_gray.h"
#include "../utils/cuda_helper.h"
__global__ void toGrayKernel(cudaSurfaceObject_t image, int width, int height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
uchar4 color;
surf2Dread(&color, image, x * 4, y);
unsigned char gray = 0.2989 * color.x + 0.5870 * color.y + 0.1140 * color.z;
color.x = gray;
color.y = gray;
color.z = gray;
surf2Dwrite(color, image, x * 4, y);
}
ToGray::ToGray(std::shared_ptr<CudaArrayProvider> imageProvider)
: imageProvider(imageProvider)
{
}
void ToGray::runKernel()
{
imageProvider->map();
auto resDesc = imageProvider->getResourceDesc();
cudaCreateSurfaceObject(&image, &resDesc);
dim3 dimBlock(32, 32, 1);
dim3 dimGrid(divUp(imageProvider->getWidth(), dimBlock.x),
divUp(imageProvider->getHeight(), dimBlock.y), 1);
toGrayKernel<<<dimGrid, dimBlock>>>(image, imageProvider->getWidth(),
imageProvider->getHeight());
HANDLE_ERROR(cudaThreadSynchronize());
imageProvider->unmap();
}
|
7bc63290e28f85f43f0a8fbc1ad08f5ec2dd1aa7.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=32 --gridDim=1
#include <hip/hip_runtime.h>
__global__ void race (int* A, int* B)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
int temp = A[idx + 1];
A[idx] = temp;
temp = B[idx + 1];
B[idx] = temp;
} | 7bc63290e28f85f43f0a8fbc1ad08f5ec2dd1aa7.cu | //pass
//--blockDim=32 --gridDim=1
#include <cuda.h>
__global__ void race (int* A, int* B)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
int temp = A[idx + 1];
A[idx] = temp;
temp = B[idx + 1];
B[idx] = temp;
} |
0769d5517a3e05e3852358056a13600132e21810.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <sys/stat.h>
#include "common.h"
#include "common_struct.h"
#include "io_utils.h"
#include "rmse.h"
using namespace std;
// Helper functions to check if file exists
bool exists (const std::string& name) {
struct stat buffer;
return (stat(name.c_str(), &buffer) == 0);
}
int main (int argc, const char* argv[]){
string infile = "";
string testfile = "None";
int version = 1;
if(argc < 2){
cout << argv[0] << " [-t <threads> -p <predictions/user> -o <output-tsv>] <input-tsv>" << endl;
return(0);
}else if(argc == 2){
infile = string(argv[1]);
}else{
for(int i = 0; i < argc; i++){
if(string(argv[i]) == "-i" && i < argc-1){
infile = string(argv[i+1]);
}
if(string(argv[i]) == "-y" && i < argc-1){
testfile = string(argv[i+1]);
}
if(string(argv[i]) == "-v" && i < argc-1){
version = stoi(argv[i+1]);
}
if(string(argv[i]) == "-h"){
cout << argv[0] << " [-t <threads> -p <predictions/user> -o <output-tsv>] <input-tsv>" << endl;
return(0);
}
}
}
if(!exists(infile)){
cout << infile << " doesn't exist!" << endl;
return(0);
}
cout << endl;
cout << "Input file : " << infile << endl;
cout << "Test file : " << testfile << endl;
cout << "Version : " << version << endl;
SGD sgd_model;
Mf_info mf_info;
vector<Node> test_set = read_testset_pretrained_model(&mf_info, testfile);
read_trained_model(&mf_info, &sgd_model, infile);
remove_elements(&mf_info, test_set, version ,testfile);
hipMalloc(&mf_info.d_test_COO, sizeof(Node) * mf_info.test_n);
hipMalloc(&sgd_model.d_p, sizeof(float) * mf_info.params.k * mf_info.max_user);
hipMalloc(&sgd_model.d_q, sizeof(float) * mf_info.params.k * mf_info.max_item);
hipMemcpy(mf_info.d_test_COO, mf_info.test_COO, sizeof(Node) * mf_info.test_n, hipMemcpyHostToDevice);
float* d_e_group;
unsigned int error_kernel_work_groups = ceil(mf_info.test_n/(float)512);
unsigned int group_error_size = error_kernel_work_groups;
unsigned int iter_num = ceil(mf_info.test_n / (float) (512 * error_kernel_work_groups));
unsigned int seg_size = 32;
hipMalloc(&d_e_group, sizeof(float) * group_error_size);
double rmse = gpu_test_rmse(&mf_info, &sgd_model, mf_info.d_test_COO, d_e_group, error_kernel_work_groups, iter_num, seg_size, group_error_size);
cout << "RMSE : " << rmse << endl;
} | 0769d5517a3e05e3852358056a13600132e21810.cu | #include <iostream>
#include <sys/stat.h>
#include "common.h"
#include "common_struct.h"
#include "io_utils.h"
#include "rmse.h"
using namespace std;
// Helper functions to check if file exists
bool exists (const std::string& name) {
struct stat buffer;
return (stat(name.c_str(), &buffer) == 0);
}
int main (int argc, const char* argv[]){
string infile = "";
string testfile = "None";
int version = 1;
if(argc < 2){
cout << argv[0] << " [-t <threads> -p <predictions/user> -o <output-tsv>] <input-tsv>" << endl;
return(0);
}else if(argc == 2){
infile = string(argv[1]);
}else{
for(int i = 0; i < argc; i++){
if(string(argv[i]) == "-i" && i < argc-1){
infile = string(argv[i+1]);
}
if(string(argv[i]) == "-y" && i < argc-1){
testfile = string(argv[i+1]);
}
if(string(argv[i]) == "-v" && i < argc-1){
version = stoi(argv[i+1]);
}
if(string(argv[i]) == "-h"){
cout << argv[0] << " [-t <threads> -p <predictions/user> -o <output-tsv>] <input-tsv>" << endl;
return(0);
}
}
}
if(!exists(infile)){
cout << infile << " doesn't exist!" << endl;
return(0);
}
cout << endl;
cout << "Input file : " << infile << endl;
cout << "Test file : " << testfile << endl;
cout << "Version : " << version << endl;
SGD sgd_model;
Mf_info mf_info;
vector<Node> test_set = read_testset_pretrained_model(&mf_info, testfile);
read_trained_model(&mf_info, &sgd_model, infile);
remove_elements(&mf_info, test_set, version ,testfile);
cudaMalloc(&mf_info.d_test_COO, sizeof(Node) * mf_info.test_n);
cudaMalloc(&sgd_model.d_p, sizeof(float) * mf_info.params.k * mf_info.max_user);
cudaMalloc(&sgd_model.d_q, sizeof(float) * mf_info.params.k * mf_info.max_item);
cudaMemcpy(mf_info.d_test_COO, mf_info.test_COO, sizeof(Node) * mf_info.test_n, cudaMemcpyHostToDevice);
float* d_e_group;
unsigned int error_kernel_work_groups = ceil(mf_info.test_n/(float)512);
unsigned int group_error_size = error_kernel_work_groups;
unsigned int iter_num = ceil(mf_info.test_n / (float) (512 * error_kernel_work_groups));
unsigned int seg_size = 32;
cudaMalloc(&d_e_group, sizeof(float) * group_error_size);
double rmse = gpu_test_rmse(&mf_info, &sgd_model, mf_info.d_test_COO, d_e_group, error_kernel_work_groups, iter_num, seg_size, group_error_size);
cout << "RMSE : " << rmse << endl;
} |
14fcb6eece9750a4fd11c12534debff5dc07423a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "relu_kernel.h"
#define BLOCK_SIZE 32
__device__ __forceinline__ float relu(float a){
return a > 0.0f ? a:0.0f;
}
__global__ void relu_kernel (const float * __restrict__ src, float * __restrict__ dst, int m, int n){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if (row < m && col < n){
dst[row * n + col] = relu(src[row * n + col]);
}
}
void relu_kernel_exec(const float *src, float *dst, int m, int n){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((n+block.x-1)/block.x, (m+block.y-1)/block.y);
/* lunch kernel */
hipLaunchKernelGGL(( relu_kernel), dim3(grid), dim3(block), 0, 0, src, dst, m, n);
hipDeviceSynchronize();
}
| 14fcb6eece9750a4fd11c12534debff5dc07423a.cu | #include "relu_kernel.h"
#define BLOCK_SIZE 32
__device__ __forceinline__ float relu(float a){
return a > 0.0f ? a:0.0f;
}
__global__ void relu_kernel (const float * __restrict__ src, float * __restrict__ dst, int m, int n){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if (row < m && col < n){
dst[row * n + col] = relu(src[row * n + col]);
}
}
void relu_kernel_exec(const float *src, float *dst, int m, int n){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((n+block.x-1)/block.x, (m+block.y-1)/block.y);
/* lunch kernel */
relu_kernel<<<grid, block>>>(src, dst, m, n);
cudaThreadSynchronize();
}
|
b43583199f26c61697bc514a1b3e3953b39633a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "kernels.h"
__global__
void saxpy_cudac(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
//if (i < n) addFourTimes(i, y);
}
void testSaxpy_cudac(void)
{
int N = 1<<21;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy_cudac), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf(" Ran CUDA C kernel. Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
| b43583199f26c61697bc514a1b3e3953b39633a2.cu | #include <stdio.h>
#include "kernels.h"
__global__
void saxpy_cudac(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
//if (i < n) addFourTimes(i, y);
}
void testSaxpy_cudac(void)
{
int N = 1<<21;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy_cudac<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf(" Ran CUDA C kernel. Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
016c202dd1e50cf0505c5cd55abb8088a448a947.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Genoil's CUDA mining kernel for Ethereum
* based on Tim Hughes' opencl kernel.
* thanks to sp_, trpuvot, djm34, cbuchner for things i took from ccminer.
*/
#include "CUDAMiner_cuda.h"
#include "cuda_helper.h"
#include "../libethash/ethash.h"
#include "stdio.h"
#include "nvm_til.h"
// Implementation based on:
// https://github.com/mjosaarinen/tiny_sha3/blob/master/sha3.c
// converted from 64->32 bit words
__device__ __constant__ const uint64_t keccakf_rndc[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808AULL,
0x8000000080008000ULL, 0x000000000000808BULL, 0x0000000080000001ULL,
0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008AULL,
0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000AULL,
0x000000008000808BULL, 0x800000000000008BULL, 0x8000000000008089ULL,
0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
0x000000000000800AULL, 0x800000008000000AULL, 0x8000000080008081ULL,
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
__device__ __forceinline__ void keccak_f1600_round(uint64_t st[25], const int r)
{
const uint32_t keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const uint32_t keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
uint64_t t, bc[5];
// Theta
for (int i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (int i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
for (uint32_t j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
t = st[1];
for (int i = 0; i < 24; i++) {
uint32_t j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL64(t, keccakf_rotc[i]);
t = bc[0];
}
// Chi
for (uint32_t j = 0; j < 25; j += 5) {
for (int i = 0; i < 5; i++)
bc[i] = st[j + i];
for (int i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
__device__ __forceinline__ void keccak_f1600(uint64_t st[25])
{
for (int i = 8; i < 25; i++)
{
st[i] = 0;
}
st[8] = 0x8000000000000001;
for (int r = 0; r < 24; r++) {
keccak_f1600_round(st, r);
}
}
#define FNV_PRIME 0x01000193U
#define fnv(x,y) ((uint32_t(x) * (FNV_PRIME)) ^uint32_t(y))
__device__ uint4 fnv4(uint4 a, uint4 b)
{
uint4 c;
c.x = a.x * FNV_PRIME ^ b.x;
c.y = a.y * FNV_PRIME ^ b.y;
c.z = a.z * FNV_PRIME ^ b.z;
c.w = a.w * FNV_PRIME ^ b.w;
return c;
}
#define NODE_WORDS (ETHASH_HASH_BYTES/sizeof(uint32_t))
__global__ void
ethash_calculate_dag_item(uint32_t start, hash64_t *g_dag, uint64_t dag_bytes, hash64_t* g_light, uint32_t light_words)
{
uint64_t const node_index = start + uint64_t(blockIdx.x) * blockDim.x + threadIdx.x;
uint64_t num_nodes = dag_bytes / sizeof(hash64_t);
uint64_t num_nodes_rounded = ((num_nodes + 3) / 4) * 4;
if (node_index >= num_nodes_rounded) return; // None of the threads from this quad have valid node_index
hash200_t dag_node;
for(int i=0; i<4; i++)
dag_node.uint4s[i] = g_light[node_index % light_words].uint4s[i];
dag_node.words[0] ^= node_index;
keccak_f1600(dag_node.uint64s);
const int thread_id = threadIdx.x & 3;
#pragma unroll
for (uint32_t i = 0; i < ETHASH_DATASET_PARENTS; ++i) {
uint32_t parent_index = fnv(node_index ^ i, dag_node.words[i % NODE_WORDS]) % light_words;
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,parent_index, t, 4);
uint4 p4 = g_light[shuffle_index].uint4s[thread_id];
#pragma unroll
for (int w = 0; w < 4; w++) {
uint4 s4 = make_uint4(__shfl_sync(0xFFFFFFFF,p4.x, w, 4),
__shfl_sync(0xFFFFFFFF,p4.y, w, 4),
__shfl_sync(0xFFFFFFFF,p4.z, w, 4),
__shfl_sync(0xFFFFFFFF,p4.w, w, 4));
if (t == thread_id) {
dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], s4);
}
}
}
}
keccak_f1600(dag_node.uint64s);
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,node_index, t, 4);
uint4 s[4];
for (uint32_t w = 0; w < 4; w++) {
s[w] = make_uint4(__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].x, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].y, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].z, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].w, t, 4));
}
if(shuffle_index*sizeof(hash64_t) < dag_bytes){
g_dag[shuffle_index].uint4s[thread_id] = s[thread_id];
}
}
L2WB;
MEM_FENCE;
}
void ethash_generate_dag(
hash64_t* dag,
uint64_t dag_bytes,
hash64_t * light,
uint32_t light_words,
uint32_t blocks,
uint32_t threads,
hipStream_t stream,
int device
)
{
uint64_t const work = dag_bytes / sizeof(hash64_t);
uint32_t fullRuns = (uint32_t)(work / (blocks * threads));
uint32_t const restWork = (uint32_t)(work % (blocks * threads));
if (restWork > 0) fullRuns++;
printf("fullRuns=%d\n",fullRuns);
for (uint32_t i = 0; i < fullRuns; i++)
{
hipLaunchKernelGGL(( ethash_calculate_dag_item) , dim3(blocks), dim3(threads), 0, stream , i * blocks * threads, dag, dag_bytes, light, light_words);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
CUDA_SAFE_CALL(hipGetLastError());
}
| 016c202dd1e50cf0505c5cd55abb8088a448a947.cu | /*
* Genoil's CUDA mining kernel for Ethereum
* based on Tim Hughes' opencl kernel.
* thanks to sp_, trpuvot, djm34, cbuchner for things i took from ccminer.
*/
#include "CUDAMiner_cuda.h"
#include "cuda_helper.h"
#include "../libethash/ethash.h"
#include "stdio.h"
#include "nvm_til.h"
// Implementation based on:
// https://github.com/mjosaarinen/tiny_sha3/blob/master/sha3.c
// converted from 64->32 bit words
__device__ __constant__ const uint64_t keccakf_rndc[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808AULL,
0x8000000080008000ULL, 0x000000000000808BULL, 0x0000000080000001ULL,
0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008AULL,
0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000AULL,
0x000000008000808BULL, 0x800000000000008BULL, 0x8000000000008089ULL,
0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
0x000000000000800AULL, 0x800000008000000AULL, 0x8000000080008081ULL,
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
__device__ __forceinline__ void keccak_f1600_round(uint64_t st[25], const int r)
{
const uint32_t keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const uint32_t keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
uint64_t t, bc[5];
// Theta
for (int i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (int i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
for (uint32_t j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
t = st[1];
for (int i = 0; i < 24; i++) {
uint32_t j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL64(t, keccakf_rotc[i]);
t = bc[0];
}
// Chi
for (uint32_t j = 0; j < 25; j += 5) {
for (int i = 0; i < 5; i++)
bc[i] = st[j + i];
for (int i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
__device__ __forceinline__ void keccak_f1600(uint64_t st[25])
{
for (int i = 8; i < 25; i++)
{
st[i] = 0;
}
st[8] = 0x8000000000000001;
for (int r = 0; r < 24; r++) {
keccak_f1600_round(st, r);
}
}
#define FNV_PRIME 0x01000193U
#define fnv(x,y) ((uint32_t(x) * (FNV_PRIME)) ^uint32_t(y))
__device__ uint4 fnv4(uint4 a, uint4 b)
{
uint4 c;
c.x = a.x * FNV_PRIME ^ b.x;
c.y = a.y * FNV_PRIME ^ b.y;
c.z = a.z * FNV_PRIME ^ b.z;
c.w = a.w * FNV_PRIME ^ b.w;
return c;
}
#define NODE_WORDS (ETHASH_HASH_BYTES/sizeof(uint32_t))
__global__ void
ethash_calculate_dag_item(uint32_t start, hash64_t *g_dag, uint64_t dag_bytes, hash64_t* g_light, uint32_t light_words)
{
uint64_t const node_index = start + uint64_t(blockIdx.x) * blockDim.x + threadIdx.x;
uint64_t num_nodes = dag_bytes / sizeof(hash64_t);
uint64_t num_nodes_rounded = ((num_nodes + 3) / 4) * 4;
if (node_index >= num_nodes_rounded) return; // None of the threads from this quad have valid node_index
hash200_t dag_node;
for(int i=0; i<4; i++)
dag_node.uint4s[i] = g_light[node_index % light_words].uint4s[i];
dag_node.words[0] ^= node_index;
keccak_f1600(dag_node.uint64s);
const int thread_id = threadIdx.x & 3;
#pragma unroll
for (uint32_t i = 0; i < ETHASH_DATASET_PARENTS; ++i) {
uint32_t parent_index = fnv(node_index ^ i, dag_node.words[i % NODE_WORDS]) % light_words;
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,parent_index, t, 4);
uint4 p4 = g_light[shuffle_index].uint4s[thread_id];
#pragma unroll
for (int w = 0; w < 4; w++) {
uint4 s4 = make_uint4(__shfl_sync(0xFFFFFFFF,p4.x, w, 4),
__shfl_sync(0xFFFFFFFF,p4.y, w, 4),
__shfl_sync(0xFFFFFFFF,p4.z, w, 4),
__shfl_sync(0xFFFFFFFF,p4.w, w, 4));
if (t == thread_id) {
dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], s4);
}
}
}
}
keccak_f1600(dag_node.uint64s);
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,node_index, t, 4);
uint4 s[4];
for (uint32_t w = 0; w < 4; w++) {
s[w] = make_uint4(__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].x, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].y, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].z, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].w, t, 4));
}
if(shuffle_index*sizeof(hash64_t) < dag_bytes){
g_dag[shuffle_index].uint4s[thread_id] = s[thread_id];
}
}
L2WB;
MEM_FENCE;
}
void ethash_generate_dag(
hash64_t* dag,
uint64_t dag_bytes,
hash64_t * light,
uint32_t light_words,
uint32_t blocks,
uint32_t threads,
cudaStream_t stream,
int device
)
{
uint64_t const work = dag_bytes / sizeof(hash64_t);
uint32_t fullRuns = (uint32_t)(work / (blocks * threads));
uint32_t const restWork = (uint32_t)(work % (blocks * threads));
if (restWork > 0) fullRuns++;
printf("fullRuns=%d\n",fullRuns);
for (uint32_t i = 0; i < fullRuns; i++)
{
ethash_calculate_dag_item <<<blocks, threads, 0, stream >>>(i * blocks * threads, dag, dag_bytes, light, light_words);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
}
CUDA_SAFE_CALL(cudaGetLastError());
}
|
2bc96a139ea40a842e0d66909dbe4a7e94ee9d6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaDomdecRecipLooper.h"
void CudaDomdecRecipLooper::run() {
while(true) {
// Receive header and stop if the STOP signal was received
if (!recipComm.recv_header()) break;
// Re-allocate coordinate array if needed
xyzq.realloc(recipComm.get_ncoord());
// Re-allocate force array if needed
reallocate<float3>(&force, &force_len, recipComm.get_ncoord(), 1.0f);
// Receive coordinates from Direct nodes
recipComm.recv_coord(xyzq.xyzq, stream);
//xyzq.save("xyzq_recip.txt");
assert(xyzq.xyzq == recipComm.get_coord_ptr());
// Compute forces
recip.calc(recipComm.get_inv_boxx(), recipComm.get_inv_boxy(), recipComm.get_inv_boxz(),
recipComm.get_coord_ptr(), recipComm.get_ncoord(),
recipComm.get_calc_energy(), recipComm.get_calc_virial(), force);
//NOTE: this synchronization is done in recipComm.send_force()
//cudaCheck(hipStreamSynchronize(stream));
//save_float3(recipComm.get_ncoord(), force, "force_recip.txt");
// Send forces to Direct nodes
recipComm.send_force(force, stream);
}
}
| 2bc96a139ea40a842e0d66909dbe4a7e94ee9d6b.cu | #include "CudaDomdecRecipLooper.h"
void CudaDomdecRecipLooper::run() {
while(true) {
// Receive header and stop if the STOP signal was received
if (!recipComm.recv_header()) break;
// Re-allocate coordinate array if needed
xyzq.realloc(recipComm.get_ncoord());
// Re-allocate force array if needed
reallocate<float3>(&force, &force_len, recipComm.get_ncoord(), 1.0f);
// Receive coordinates from Direct nodes
recipComm.recv_coord(xyzq.xyzq, stream);
//xyzq.save("xyzq_recip.txt");
assert(xyzq.xyzq == recipComm.get_coord_ptr());
// Compute forces
recip.calc(recipComm.get_inv_boxx(), recipComm.get_inv_boxy(), recipComm.get_inv_boxz(),
recipComm.get_coord_ptr(), recipComm.get_ncoord(),
recipComm.get_calc_energy(), recipComm.get_calc_virial(), force);
//NOTE: this synchronization is done in recipComm.send_force()
//cudaCheck(cudaStreamSynchronize(stream));
//save_float3(recipComm.get_ncoord(), force, "force_recip.txt");
// Send forces to Direct nodes
recipComm.send_force(force, stream);
}
}
|
13e1b27dfc8b1baccdc160ecc7afbac66fc0a172.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <common/fast_int_div.cuh>
#include "test_utils.h"
namespace MLCommon {
TEST(FastIntDiv, CpuTest) {
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
for (int i = 0; i < 10000; ++i) {
auto num = rand();
auto correct = num / divisor;
auto computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = -num;
correct = num / divisor;
computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
}
}
}
__global__ void fastIntDivTestKernel(int* computed, int* correct, const int* in,
FastIntDiv fid, int divisor, int len) {
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < len) {
computed[tid] = in[tid] % fid;
correct[tid] = in[tid] % divisor;
computed[len + tid] = -in[tid] % fid;
correct[len + tid] = -in[tid] % divisor;
}
}
TEST(FastIntDiv, GpuTest) {
static const int len = 100000;
static const int TPB = 128;
int *computed, *correct, *in;
allocate(computed, len * 2);
allocate(correct, len * 2);
allocate(in, len);
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
int* h_in = new int[len];
for (int i = 0; i < len; ++i) {
h_in[i] = rand();
}
updateDevice(in, h_in, len, 0);
int nblks = ceildiv(len, TPB);
hipLaunchKernelGGL(( fastIntDivTestKernel), dim3(nblks), dim3(TPB), 0, 0, computed, correct, in, fid,
divisor, len);
CUDA_CHECK(hipStreamSynchronize(0));
ASSERT_TRUE(devArrMatch(correct, computed, len * 2, Compare<int>()))
<< " divisor=" << divisor;
}
}
FastIntDiv dummyFunc(int num) {
FastIntDiv fd(num);
return fd;
}
TEST(FastIntDiv, IncorrectUsage) {
ASSERT_THROW(dummyFunc(-1), raft::exception);
ASSERT_THROW(dummyFunc(0), raft::exception);
}
} // namespace MLCommon
| 13e1b27dfc8b1baccdc160ecc7afbac66fc0a172.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <common/fast_int_div.cuh>
#include "test_utils.h"
namespace MLCommon {
TEST(FastIntDiv, CpuTest) {
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
for (int i = 0; i < 10000; ++i) {
auto num = rand();
auto correct = num / divisor;
auto computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = -num;
correct = num / divisor;
computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
}
}
}
__global__ void fastIntDivTestKernel(int* computed, int* correct, const int* in,
FastIntDiv fid, int divisor, int len) {
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < len) {
computed[tid] = in[tid] % fid;
correct[tid] = in[tid] % divisor;
computed[len + tid] = -in[tid] % fid;
correct[len + tid] = -in[tid] % divisor;
}
}
TEST(FastIntDiv, GpuTest) {
static const int len = 100000;
static const int TPB = 128;
int *computed, *correct, *in;
allocate(computed, len * 2);
allocate(correct, len * 2);
allocate(in, len);
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
int* h_in = new int[len];
for (int i = 0; i < len; ++i) {
h_in[i] = rand();
}
updateDevice(in, h_in, len, 0);
int nblks = ceildiv(len, TPB);
fastIntDivTestKernel<<<nblks, TPB, 0, 0>>>(computed, correct, in, fid,
divisor, len);
CUDA_CHECK(cudaStreamSynchronize(0));
ASSERT_TRUE(devArrMatch(correct, computed, len * 2, Compare<int>()))
<< " divisor=" << divisor;
}
}
FastIntDiv dummyFunc(int num) {
FastIntDiv fd(num);
return fd;
}
TEST(FastIntDiv, IncorrectUsage) {
ASSERT_THROW(dummyFunc(-1), raft::exception);
ASSERT_THROW(dummyFunc(0), raft::exception);
}
} // namespace MLCommon
|
08c6ce40e2076f2c23eb9b439a07745234a0ebea.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 32 // dim of matrix
//Fattened matrix multiplication . Kernel does not support x,y addressing
__global__ void mat_multiply(int* d_mat1, int* d_mat2, int* d_mat3, int width)
{
int k,sum=0;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(row<width && col<width)
{
for(k=0;k<width;k++)
{
sum += d_mat1[row*width+k] * d_mat2[k*width+col];
}
d_mat3[row*width+col] = sum;
}
}
int main()
{
int i,j;
int SIZE = N*N;
int BYTES = SIZE*sizeof(int);
// declare device and host variables
int h_mat1[N][N] , h_mat2[N][N] , h_mat3[N][N];
int *d_mat1, *d_mat2, *d_mat3;
// allocate memory on the device
hipMalloc((void**)&d_mat1,BYTES);
hipMalloc((void**)&d_mat2,BYTES);
hipMalloc((void**)&d_mat3,BYTES);
// generate matrix on host
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
h_mat1[i][j] = 1;
h_mat2[i][j] = 1;
h_mat3[i][j] = 0;
}
}
dim3 dimGrid(1,1);
dim3 dimBlock(N,N);
// move variables from host to device
hipMemcpy(d_mat1,h_mat1,BYTES,hipMemcpyHostToDevice);
hipMemcpy(d_mat2,h_mat2,BYTES,hipMemcpyHostToDevice);
// lauch kernel
hipLaunchKernelGGL(( mat_multiply), dim3(dimGrid),dim3(dimBlock), 0, 0, d_mat1,d_mat2,d_mat3,N);
hipDeviceSynchronize();
// move result back to main memory
hipMemcpy(h_mat3,d_mat3,BYTES,hipMemcpyDeviceToHost);
//print result
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
printf("%d ",h_mat3[i][j]);
}
printf("\n");
}
} | 08c6ce40e2076f2c23eb9b439a07745234a0ebea.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define N 32 // dim of matrix
//Fattened matrix multiplication . Kernel does not support x,y addressing
__global__ void mat_multiply(int* d_mat1, int* d_mat2, int* d_mat3, int width)
{
int k,sum=0;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(row<width && col<width)
{
for(k=0;k<width;k++)
{
sum += d_mat1[row*width+k] * d_mat2[k*width+col];
}
d_mat3[row*width+col] = sum;
}
}
int main()
{
int i,j;
int SIZE = N*N;
int BYTES = SIZE*sizeof(int);
// declare device and host variables
int h_mat1[N][N] , h_mat2[N][N] , h_mat3[N][N];
int *d_mat1, *d_mat2, *d_mat3;
// allocate memory on the device
cudaMalloc((void**)&d_mat1,BYTES);
cudaMalloc((void**)&d_mat2,BYTES);
cudaMalloc((void**)&d_mat3,BYTES);
// generate matrix on host
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
h_mat1[i][j] = 1;
h_mat2[i][j] = 1;
h_mat3[i][j] = 0;
}
}
dim3 dimGrid(1,1);
dim3 dimBlock(N,N);
// move variables from host to device
cudaMemcpy(d_mat1,h_mat1,BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_mat2,h_mat2,BYTES,cudaMemcpyHostToDevice);
// lauch kernel
mat_multiply<<<dimGrid,dimBlock>>>(d_mat1,d_mat2,d_mat3,N);
cudaDeviceSynchronize();
// move result back to main memory
cudaMemcpy(h_mat3,d_mat3,BYTES,cudaMemcpyDeviceToHost);
//print result
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
printf("%d ",h_mat3[i][j]);
}
printf("\n");
}
} |
7c5ee1b80ca0721edb340b920674e1644471d5a2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// Global variables
int *device_iData;
int *device_oData;
#define blockSize 512
void printArray(int n, int *a, bool abridged = false) {
printf(" [ ");
for (int i = 0; i < n; i++) {
if (abridged && i + 2 == 15 && n > 16) {
i = n - 2;
printf("... ");
}
printf("%3d ", a[i]);
}
printf("]\n");
}
/**
* Kernel to perform a Naive scan on a integer array
*/
__global__ void kernScan(int n, int power, int* outputData, int* inputData)
{
const int index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Fetch it once
const auto curr_data = inputData[index];
if(index >= power)
{
outputData[index] = inputData[index - power] + curr_data;
}
else
{
outputData[index] = curr_data;
}
}
/**
* Shifts the whole array to the right by one in parallel
*/
__global__ void kernMakeExclusive(int n, int* outputData, int* inputData)
{
const int index = (blockIdx.x * blockDim.x) + threadIdx.x;
outputData[index] = index != 0 ? inputData[index - 1] : 0;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata)
{
// 1. Allocate the memory in device
hipMalloc(reinterpret_cast<void**>(&device_iData), n * (sizeof(int)));
hipMalloc(reinterpret_cast<void**>(&device_oData), n * (sizeof(int)));
hipMemcpy(device_iData, idata, sizeof(int) * n, hipMemcpyHostToDevice);
hipDeviceSynchronize();
timer().startGpuTimer();
// 2. Compute Block count
dim3 num_blocks((n + blockSize - 1) / blockSize);
// 3. Call the kernel
const auto logn = ilog2ceil(n);
for (auto i = 1; i <= logn; ++i)
{
const auto power = 1 << (i - 1);
kernScan << < num_blocks, blockSize >> > (n, power, device_oData, device_iData);
// Swap
const auto temp = device_iData;
device_iData = device_oData;
device_oData = temp;
}
// Make it exclusive as we need that for stream compaction later on
kernMakeExclusive <<< num_blocks, blockSize >> > (n, device_oData, device_iData);
hipDeviceSynchronize();
timer().endGpuTimer();
hipMemcpy(odata, device_oData, sizeof(int) * n, hipMemcpyDeviceToHost);
// 4. Free up any gpu memory
hipFree(device_iData);
hipFree(device_oData);
}
}
}
| 7c5ee1b80ca0721edb340b920674e1644471d5a2.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// Global variables
int *device_iData;
int *device_oData;
#define blockSize 512
void printArray(int n, int *a, bool abridged = false) {
printf(" [ ");
for (int i = 0; i < n; i++) {
if (abridged && i + 2 == 15 && n > 16) {
i = n - 2;
printf("... ");
}
printf("%3d ", a[i]);
}
printf("]\n");
}
/**
* Kernel to perform a Naive scan on a integer array
*/
__global__ void kernScan(int n, int power, int* outputData, int* inputData)
{
const int index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Fetch it once
const auto curr_data = inputData[index];
if(index >= power)
{
outputData[index] = inputData[index - power] + curr_data;
}
else
{
outputData[index] = curr_data;
}
}
/**
* Shifts the whole array to the right by one in parallel
*/
__global__ void kernMakeExclusive(int n, int* outputData, int* inputData)
{
const int index = (blockIdx.x * blockDim.x) + threadIdx.x;
outputData[index] = index != 0 ? inputData[index - 1] : 0;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata)
{
// 1. Allocate the memory in device
cudaMalloc(reinterpret_cast<void**>(&device_iData), n * (sizeof(int)));
cudaMalloc(reinterpret_cast<void**>(&device_oData), n * (sizeof(int)));
cudaMemcpy(device_iData, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
timer().startGpuTimer();
// 2. Compute Block count
dim3 num_blocks((n + blockSize - 1) / blockSize);
// 3. Call the kernel
const auto logn = ilog2ceil(n);
for (auto i = 1; i <= logn; ++i)
{
const auto power = 1 << (i - 1);
kernScan << < num_blocks, blockSize >> > (n, power, device_oData, device_iData);
// Swap
const auto temp = device_iData;
device_iData = device_oData;
device_oData = temp;
}
// Make it exclusive as we need that for stream compaction later on
kernMakeExclusive <<< num_blocks, blockSize >> > (n, device_oData, device_iData);
cudaDeviceSynchronize();
timer().endGpuTimer();
cudaMemcpy(odata, device_oData, sizeof(int) * n, cudaMemcpyDeviceToHost);
// 4. Free up any gpu memory
cudaFree(device_iData);
cudaFree(device_oData);
}
}
}
|
1407d28d3eb5c12212f40071f1ee2a229c29087c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
#define K_PHIMAG_BLOCK_SIZE 512
#define K_Q_BLOCK_SIZE 256
#define K_Q_K_ELEMS_PER_GRID 1024
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
__constant__ __device__ kValues kVal[K_Q_K_ELEMS_PER_GRID];
__global__ void ComputePhiMag_GPU(float* phiR, float* phiI, float* phiMag, int numK){
int indexK = blockIdx.x * K_PHIMAG_BLOCK_SIZE + threadIdx.x;
if(indexK < numK){
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
}
__global__ void computeQ_GPU(int numK, int kGlobalIndex, float* x, float* y, float* z,
float* Qr, float* Qi){
__shared__ float s_x,s_y,s_z,s_Qr,s_Qi;
int xIndex = blockIdx.x * K_Q_BLOCK_SIZE + threadIdx.x;
s_x = x[xIndex];
s_y = y[xIndex];
s_z = z[xIndex];
s_Qr = Qr[xIndex];
s_Qi = Qi[xIndex];
int indexK = 0;
//vincent if it's odd, initialize the first
if(numK % 2){
float expArg = PIx2 * (kVal[0].Kx * s_x +
kVal[0].Ky * s_y +
kVal[0].Kz * s_z);
s_Qr += kVal[0].PhiMag * cosf(expArg);
s_Qi += kVal[0].PhiMag * sinf(expArg);
indexK++;
kGlobalIndex++;
}
//vincent the rest are even and compute 2 time in every iteration
for(; indexK < K_Q_K_ELEMS_PER_GRID && kGlobalIndex < numK; indexK+=2, kGlobalIndex+=2){
float expArg = PIx2 * (kVal[indexK].Kx * s_x +
kVal[indexK].Ky * s_y +
kVal[indexK].Kz * s_z);
s_Qr += kVal[indexK].PhiMag * cosf(expArg);
s_Qi += kVal[indexK].PhiMag * sinf(expArg);
int indexk_1 = indexK + 1;
float expArg_1 = PIx2 * (kVal[indexk_1].Kx * s_x +
kVal[indexk_1].Ky * s_y +
kVal[indexk_1].Kz * s_z);
s_Qr += kVal[indexk_1].PhiMag * cosf(expArg_1);
s_Qi += kVal[indexk_1].PhiMag * sinf(expArg_1);
}
Qr[xIndex] = s_Qr;
Qi[xIndex] = s_Qi;
}
void ComputePhiMagGPU(int numK, float* d_phiR, float* d_phiI, float* d_phiMag){
int phiMag_block = (numK-1) / K_PHIMAG_BLOCK_SIZE + 1;
dim3 DimPhiMagBlock(K_PHIMAG_BLOCK_SIZE,1);
dim3 DimPhiMagGrid(phiMag_block,1);
hipLaunchKernelGGL(( ComputePhiMag_GPU), dim3(DimPhiMagGrid), dim3(DimPhiMagBlock), 0, 0, d_phiR, d_phiI, d_phiMag, numK);
}
void computeQGPU(int numK, int numX,float* d_x, float* d_y, float* d_z,
kValues* kVals,float* d_Qr, float* d_Qi){
int gridQ = (numK -1) / K_Q_K_ELEMS_PER_GRID + 1;
int blockQ = (numX - 1) / K_Q_BLOCK_SIZE + 1;
dim3 DimQBlock(K_Q_BLOCK_SIZE, 1);
dim3 DimQGrid(blockQ,1);
for(int i = 0; i < gridQ; i++){
int QGridBase = i * K_Q_K_ELEMS_PER_GRID;
kValues* kValsTile = kVals + QGridBase;
int num = MIN(K_Q_K_ELEMS_PER_GRID, numK - QGridBase);
hipMemcpyToSymbol(kVal, kValsTile, num * sizeof(kValues), 0);
hipLaunchKernelGGL(( computeQ_GPU), dim3(DimQGrid), dim3(DimQBlock), 0, 0, numK,QGridBase,d_x,d_y,d_z,d_Qr,d_Qi);
}
}
void createDataStructsCPU(int numK, int numX, float** phiMag,
float** Qr, float** Qi){
*phiMag = (float* ) malloc(numK * sizeof(float));
*Qr = (float*) malloc(numX * sizeof (float));
*Qi = (float*) malloc(numX * sizeof (float));
} | 1407d28d3eb5c12212f40071f1ee2a229c29087c.cu | #include <cstdlib>
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
#define K_PHIMAG_BLOCK_SIZE 512
#define K_Q_BLOCK_SIZE 256
#define K_Q_K_ELEMS_PER_GRID 1024
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
__constant__ __device__ kValues kVal[K_Q_K_ELEMS_PER_GRID];
__global__ void ComputePhiMag_GPU(float* phiR, float* phiI, float* phiMag, int numK){
int indexK = blockIdx.x * K_PHIMAG_BLOCK_SIZE + threadIdx.x;
if(indexK < numK){
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
}
__global__ void computeQ_GPU(int numK, int kGlobalIndex, float* x, float* y, float* z,
float* Qr, float* Qi){
__shared__ float s_x,s_y,s_z,s_Qr,s_Qi;
int xIndex = blockIdx.x * K_Q_BLOCK_SIZE + threadIdx.x;
s_x = x[xIndex];
s_y = y[xIndex];
s_z = z[xIndex];
s_Qr = Qr[xIndex];
s_Qi = Qi[xIndex];
int indexK = 0;
//vincent if it's odd, initialize the first
if(numK % 2){
float expArg = PIx2 * (kVal[0].Kx * s_x +
kVal[0].Ky * s_y +
kVal[0].Kz * s_z);
s_Qr += kVal[0].PhiMag * cosf(expArg);
s_Qi += kVal[0].PhiMag * sinf(expArg);
indexK++;
kGlobalIndex++;
}
//vincent the rest are even and compute 2 time in every iteration
for(; indexK < K_Q_K_ELEMS_PER_GRID && kGlobalIndex < numK; indexK+=2, kGlobalIndex+=2){
float expArg = PIx2 * (kVal[indexK].Kx * s_x +
kVal[indexK].Ky * s_y +
kVal[indexK].Kz * s_z);
s_Qr += kVal[indexK].PhiMag * cosf(expArg);
s_Qi += kVal[indexK].PhiMag * sinf(expArg);
int indexk_1 = indexK + 1;
float expArg_1 = PIx2 * (kVal[indexk_1].Kx * s_x +
kVal[indexk_1].Ky * s_y +
kVal[indexk_1].Kz * s_z);
s_Qr += kVal[indexk_1].PhiMag * cosf(expArg_1);
s_Qi += kVal[indexk_1].PhiMag * sinf(expArg_1);
}
Qr[xIndex] = s_Qr;
Qi[xIndex] = s_Qi;
}
void ComputePhiMagGPU(int numK, float* d_phiR, float* d_phiI, float* d_phiMag){
int phiMag_block = (numK-1) / K_PHIMAG_BLOCK_SIZE + 1;
dim3 DimPhiMagBlock(K_PHIMAG_BLOCK_SIZE,1);
dim3 DimPhiMagGrid(phiMag_block,1);
ComputePhiMag_GPU<<<DimPhiMagGrid, DimPhiMagBlock>>>(d_phiR, d_phiI, d_phiMag, numK);
}
void computeQGPU(int numK, int numX,float* d_x, float* d_y, float* d_z,
kValues* kVals,float* d_Qr, float* d_Qi){
int gridQ = (numK -1) / K_Q_K_ELEMS_PER_GRID + 1;
int blockQ = (numX - 1) / K_Q_BLOCK_SIZE + 1;
dim3 DimQBlock(K_Q_BLOCK_SIZE, 1);
dim3 DimQGrid(blockQ,1);
for(int i = 0; i < gridQ; i++){
int QGridBase = i * K_Q_K_ELEMS_PER_GRID;
kValues* kValsTile = kVals + QGridBase;
int num = MIN(K_Q_K_ELEMS_PER_GRID, numK - QGridBase);
cudaMemcpyToSymbol(kVal, kValsTile, num * sizeof(kValues), 0);
computeQ_GPU<<<DimQGrid, DimQBlock>>>(numK,QGridBase,d_x,d_y,d_z,d_Qr,d_Qi);
}
}
void createDataStructsCPU(int numK, int numX, float** phiMag,
float** Qr, float** Qi){
*phiMag = (float* ) malloc(numK * sizeof(float));
*Qr = (float*) malloc(numX * sizeof (float));
*Qi = (float*) malloc(numX * sizeof (float));
} |
4b796c3d0cac6f60676a0fa6fb1e992c3dcadb5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Particle.h"
#include <vector>
#include <ctime>
__host__ std::vector<Particle> createRainParticles(size_t size)
{
std::vector<Particle> particles;
for (size_t i = 0; i < size; ++i)
{
Vec3 direction(0, -1, 0);
Vec3 initPosition(
static_cast<float>(rand() % 200 - 100),
100.0f,
static_cast<float>(rand() % 200 - 100)
);
particles.push_back(Particle(static_cast<float>(rand() % 100), 100.0f, 5.0f, initPosition, direction, 5.0f));
}
return particles;
}
__host__ __device__ void rainParticleMove(Particle &particle, const Vec3 wind)
{
particle.Move();
particle.AddForce(wind, 50.0f);
}
__host__ void rainParticlesMoveCPU_execute(Particle *src, size_t size, const Vec3 wind)
{
for (size_t i = 0; i < size; ++i)
{
rainParticleMove(src[i], wind);
}
}
__global__ void rainParticlesMoveGPU_execute(Particle *src, size_t size, const Vec3 wind)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) rainParticleMove(src[index], wind);
}
__host__ void rainParticlesMoveCPU_prepare(size_t size, size_t iterSize)
{
std::vector<Particle> particles = createRainParticles(size);
std::cout << "Start moving" << std::endl;
PrintResults(particles, "particles_CPU.txt");
for (size_t i = 0; i < iterSize; ++i)
{
rainParticlesMoveCPU_execute(&particles[0], particles.size(), Vec3(1.0f, 0.0f, 0.0f));
}
PrintResults(particles, "particles_CPU.txt", true);
}
__host__ void rainParticlesMoveGPU_prepare(size_t size, size_t iterSize)
{
std::vector<Particle> particlesCPU = createRainParticles(size);
Particle *particlesGPU = NULL;
PrintResults(particlesCPU, "particles_GPU.txt");
size_t byteSize = particlesCPU.size() * sizeof(Particle);
HANDLE_ERROR(hipMalloc(&particlesGPU, byteSize));
HANDLE_ERROR(hipMemcpy(particlesGPU, &particlesCPU[0], byteSize, hipMemcpyHostToDevice));
CUDAConfig cudaConfig(size);
unsigned int gridSize = cudaConfig.GetGridSize();
unsigned int blockSize = cudaConfig.GetBlockSize();
std::cout << "Grid Size: " << gridSize << std::endl;
std::cout << "Block Size: " << blockSize << std::endl;
std::cout << "Start moving" << std::endl;
PrintResults(particlesCPU, "particles_GPU.txt");
for (size_t i = 0; i < iterSize; ++i)
{
hipLaunchKernelGGL(( rainParticlesMoveGPU_execute), dim3(gridSize), dim3(blockSize), 0, 0, particlesGPU, particlesCPU.size(), Vec3(1.0f, 0.0f, 0.0f));
}
HANDLE_ERROR(hipMemcpy(&particlesCPU[0], particlesGPU, byteSize, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(particlesGPU));
PrintResults(particlesCPU, "particles_GPU.txt", true);
}
int main_project(int argc, char **argv)
{
size_t sizeParticles = 100000;
size_t sizeIterations = 1000000;
// CPU
{
std::cout << "Start CPU" << std::endl;
clock_t t = clock();
rainParticlesMoveCPU_prepare(sizeParticles, sizeIterations);
t = clock() - t;
std::cout << "Done: (" << t << "ms)" << std::endl;
}
std::cout << std::endl;
// GPU
{
hipEvent_t start, stop;
float elapsedTime = 0;
hipEventCreate(&start);
hipEventCreate(&stop);
std::cout << "Start GPU" << std::endl;
hipEventRecord(start);
rainParticlesMoveGPU_prepare(sizeParticles, sizeIterations);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
std::cout << "Done: (" << elapsedTime << "ms)" << std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
}
return 0;
}
| 4b796c3d0cac6f60676a0fa6fb1e992c3dcadb5d.cu | #include "Particle.h"
#include <vector>
#include <ctime>
__host__ std::vector<Particle> createRainParticles(size_t size)
{
std::vector<Particle> particles;
for (size_t i = 0; i < size; ++i)
{
Vec3 direction(0, -1, 0);
Vec3 initPosition(
static_cast<float>(rand() % 200 - 100),
100.0f,
static_cast<float>(rand() % 200 - 100)
);
particles.push_back(Particle(static_cast<float>(rand() % 100), 100.0f, 5.0f, initPosition, direction, 5.0f));
}
return particles;
}
__host__ __device__ void rainParticleMove(Particle &particle, const Vec3 wind)
{
particle.Move();
particle.AddForce(wind, 50.0f);
}
__host__ void rainParticlesMoveCPU_execute(Particle *src, size_t size, const Vec3 wind)
{
for (size_t i = 0; i < size; ++i)
{
rainParticleMove(src[i], wind);
}
}
__global__ void rainParticlesMoveGPU_execute(Particle *src, size_t size, const Vec3 wind)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) rainParticleMove(src[index], wind);
}
__host__ void rainParticlesMoveCPU_prepare(size_t size, size_t iterSize)
{
std::vector<Particle> particles = createRainParticles(size);
std::cout << "Start moving" << std::endl;
PrintResults(particles, "particles_CPU.txt");
for (size_t i = 0; i < iterSize; ++i)
{
rainParticlesMoveCPU_execute(&particles[0], particles.size(), Vec3(1.0f, 0.0f, 0.0f));
}
PrintResults(particles, "particles_CPU.txt", true);
}
__host__ void rainParticlesMoveGPU_prepare(size_t size, size_t iterSize)
{
std::vector<Particle> particlesCPU = createRainParticles(size);
Particle *particlesGPU = NULL;
PrintResults(particlesCPU, "particles_GPU.txt");
size_t byteSize = particlesCPU.size() * sizeof(Particle);
HANDLE_ERROR(cudaMalloc(&particlesGPU, byteSize));
HANDLE_ERROR(cudaMemcpy(particlesGPU, &particlesCPU[0], byteSize, cudaMemcpyHostToDevice));
CUDAConfig cudaConfig(size);
unsigned int gridSize = cudaConfig.GetGridSize();
unsigned int blockSize = cudaConfig.GetBlockSize();
std::cout << "Grid Size: " << gridSize << std::endl;
std::cout << "Block Size: " << blockSize << std::endl;
std::cout << "Start moving" << std::endl;
PrintResults(particlesCPU, "particles_GPU.txt");
for (size_t i = 0; i < iterSize; ++i)
{
rainParticlesMoveGPU_execute<<<gridSize, blockSize>>>(particlesGPU, particlesCPU.size(), Vec3(1.0f, 0.0f, 0.0f));
}
HANDLE_ERROR(cudaMemcpy(&particlesCPU[0], particlesGPU, byteSize, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(particlesGPU));
PrintResults(particlesCPU, "particles_GPU.txt", true);
}
int main_project(int argc, char **argv)
{
size_t sizeParticles = 100000;
size_t sizeIterations = 1000000;
// CPU
{
std::cout << "Start CPU" << std::endl;
clock_t t = clock();
rainParticlesMoveCPU_prepare(sizeParticles, sizeIterations);
t = clock() - t;
std::cout << "Done: (" << t << "ms)" << std::endl;
}
std::cout << std::endl;
// GPU
{
cudaEvent_t start, stop;
float elapsedTime = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::cout << "Start GPU" << std::endl;
cudaEventRecord(start);
rainParticlesMoveGPU_prepare(sizeParticles, sizeIterations);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
std::cout << "Done: (" << elapsedTime << "ms)" << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
return 0;
}
|
40439d3fea8135fb3eca364b2d18fe84fadbd8f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void SetVauleInIdxMinMax( float* vector, int id_min, int id_max, float value)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id >= id_min && id <= id_max)
vector[id] = value;
} | 40439d3fea8135fb3eca364b2d18fe84fadbd8f9.cu | #include "includes.h"
__global__ void SetVauleInIdxMinMax( float* vector, int id_min, int id_max, float value)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id >= id_min && id <= id_max)
vector[id] = value;
} |
9c5d497618f2fb81e56c9c5d61ca5db7415fc9e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// updateOutput, updateGradInput Kernels ported from Sergey Zagoruyko's pyinn, which itself was a
// port from Caffe
#include <THHUNN/THHUNN.h>
#include <THH/THHTensor.hpp>
#include <THH/THHDeviceTensor.cuh>
#include <THH/THHDeviceTensorUtils.cuh>
#include <THH/THHNumerics.cuh>
#include <THH/THHReduceApplyUtils.cuh>
#include <THH/THHSortUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THHUNN/SharedMem.cuh>
#include <THHUNN/common.h>
#include <algorithm>
#include <c10/macros/Macros.h>
// Crude benchmarks suggest 256 is better than 512 and 1024
// TODO: Autotune/use better heuristics, improve speed more.
const int MAX_BLOCK_SIZE = 256;
static int getGradParamsNumThreads(int batchSize){
//warp per item in a batch, up to a maximum
return ::min(batchSize * C10_WARP_SIZE, MAX_BLOCK_SIZE);
}
template <typename T, typename AccT, typename IndexType, int kSize>
__global__ void spatialDepthwiseConvolutionUpdateOutput(
const THCDeviceTensor<T, 4> input,
THCDeviceTensor<T, 4> output,
const THCDeviceTensor<T, 4> weight,
const THCDeviceTensor<T, 1> bias,
bool biasEnabled,
IndexType totalElements,
const int outputChannels,
const int depthwiseMultiplier,
const int inputWidth, const int inputHeight,
const int outputWidth, const int outputHeight,
const int kernelWidth, const int kernelHeight,
const int strideWidth, const int strideHeight,
const int padWidth, const int padHeight,
const int dilationWidth, const int dilationHeight)
{
const int KW_LIMIT = (kSize !=0) ? kSize : kernelWidth;
const int KH_LIMIT = (kSize !=0) ? kSize : kernelHeight;
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
//calculate n,c,h,w indices, replacing modulos by divide and multiply add,
//result is same as would be in the code below
//const int n = linearIndex / batchStride; //batchStride = outputChannels * outputHeight * outputWidth
//const int c = (linearIndex / channelStride) % outputChannels; //channelStride = outputHeight * outputWidth
//const int h = (linearIndex / outputWidth) % outputHeight;
//const int w = linearIndex % outputWidth;
int indtmp1 = linearIndex/outputWidth;
const int w = linearIndex - indtmp1 * outputWidth;
int indtmp2 = indtmp1/outputHeight;
const int h = indtmp1 - indtmp2 * outputHeight;
indtmp1 = indtmp2;
indtmp2 = indtmp1/outputChannels;
const int c = indtmp1 - indtmp2 * outputChannels;
const int n = indtmp2;
int inputChannel = c;
int inputChannels = outputChannels;
if (depthwiseMultiplier !=1) {
inputChannel /= depthwiseMultiplier;
inputChannels /= depthwiseMultiplier;
}
int weightOffset = c * kernelHeight * kernelWidth;
AccT value = biasEnabled ? ScalarConvert<T, AccT>::to(bias.data()[c]) : ScalarConvert<int, AccT>::to(0);
const IndexType offset0 = (n * inputChannels + inputChannel) * inputHeight * inputWidth;
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for (int kH = 0; kH < KH_LIMIT; ++kH) {
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for (int kW = 0; kW < KW_LIMIT; ++kW) {
const int h_in = -padHeight + h * strideHeight + kH * dilationHeight;
const int w_in = -padWidth + w * strideWidth + kW * dilationWidth;
if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) && (w_in < inputWidth)) {
const IndexType offset = offset0 + h_in * inputWidth + w_in;
value = THCNumerics<AccT>::add(
value,
THCNumerics<AccT>::mul(
ScalarConvert<T, AccT>::to(weight.data()[weightOffset]),
ScalarConvert<T, AccT>::to(input.data()[offset])));
}
++weightOffset;
}
}
output.data()[linearIndex] = ScalarConvert<AccT, T>::to(value);
}
}
template <typename T, typename AccT, typename IndexType, int kSize, int stride>
__global__ void spatialDepthwiseConvolutionUpdateGradInput(
const THCDeviceTensor<T, 4> gradOutput,
THCDeviceTensor<T, 4> gradInput,
const THCDeviceTensor<T, 4> weight,
IndexType totalElements,
const int inputChannels,
const int depthwiseMultiplier,
const int outputChannels,
const int inputWidth, const int inputHeight,
const int outputWidth, const int outputHeight,
const int kernelWidth, const int kernelHeight,
const int strideWidth, const int strideHeight,
const int padWidth, const int padHeight,
const int dilationWidth, const int dilationHeight)
{
const int KW_LIMIT = (kSize !=0) ? kSize : kernelWidth;
const int KH_LIMIT = (kSize !=0) ? kSize : kernelHeight;
const int strideW = (stride !=0) ? stride : strideWidth;
const int strideH = (stride !=0) ? stride : strideHeight;
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
int indtmp1 = linearIndex/inputWidth;
const int w = linearIndex - indtmp1 * inputWidth;
int indtmp2 = indtmp1/inputHeight;
const int h = indtmp1 - indtmp2 * inputHeight;
indtmp1 = indtmp2;
indtmp2 = indtmp1/inputChannels;
const int c = indtmp1 - indtmp2 * inputChannels;
const int n = indtmp2;
AccT value = ScalarConvert<int, AccT>::to(0);
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for (int multiplier = 0; multiplier < depthwiseMultiplier; ++multiplier) {
int och = (c * depthwiseMultiplier) + multiplier;
int weightOffset = och * kernelHeight * kernelWidth;
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for (int kh = 0; kh < KH_LIMIT; ++kh) {
#ifdef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for (int kw = 0; kw < KW_LIMIT; ++kw) {
int h_out = h + padHeight - kh * dilationHeight;
int w_out = w + padWidth - kw * dilationWidth;
if ((h_out % strideH == 0) && (w_out % strideW == 0)) {
h_out = h_out / strideH;
w_out = w_out / strideW;
if ((h_out >= 0) && (h_out < outputHeight)
&& (w_out >= 0) && (w_out < outputWidth)) {
const int offset = ((n * outputChannels + och) * outputHeight + h_out)
* outputWidth + w_out;
value = THCNumerics<AccT>::add(
value,
THCNumerics<AccT>::mul(
ScalarConvert<T, AccT>::to(weight.data()[weightOffset]),
ScalarConvert<T, AccT>::to(gradOutput.data()[offset])));
}
}
++weightOffset;
}
}
}
gradInput.data()[linearIndex] = ScalarConvert<AccT, T>::to(value);
}
}
template <typename T, typename AccT, typename IndexType>
__global__ void spatialDepthwiseConvolutionAccGradParameters(
const THCDeviceTensor<T, 4> gradOutput,
const THCDeviceTensor<T, 4> input,
THCDeviceTensor<T, 4> gradWeight,
const int batchSize,
const int inputChannels,
const int kernelChannels,
const int depthwiseMultiplier,
const int inputWidth, const int inputHeight,
const int outputWidth, const int outputHeight,
const int kernelWidth, const int kernelHeight,
const int strideWidth, const int strideHeight,
const int padWidth, const int padHeight,
const int dilationWidth, const int dilationHeight)
{
const int channelStride = kernelWidth * kernelHeight;
// Have to use a statically typed Shared Memory pointer
SharedMem<AccT> smem;
// Each Block is responsible for accumulating over a permutation of
// (channels x kH x kW), use blockIdx to determine which one
int bidx = blockIdx.x;
int kW = bidx % kernelWidth;
int kH = (bidx / kernelWidth) % kernelHeight;
int ch = (bidx / channelStride);
// Need to calculate which input channel is associated with this filter
// channel
int inputCh = ch / depthwiseMultiplier;
AccT grad = ScalarConvert<float, AccT>::to(0.0);
const int laneId = threadIdx.x % C10_WARP_SIZE;
const int batch = threadIdx.x / C10_WARP_SIZE;
const int nwarps = blockDim.x / C10_WARP_SIZE;
const int imageElements = outputWidth * outputHeight;
// Use warp per item. In the original kernel, a threadblock was used to sum over NHW.
// Here, we use a warp to sum values over HW dimension, and if batchSize is larger than the
// number of warps, a warp would loop over remaining batch items (e.g. if there are 8 warps,
// warp 0 would go over 0-8-16 etc image, warp 1 over 1-9-17 etc). Later in blockReduce,
// all the warps will be reduced anyway, thus the full reduction will be over NHW, like it
// should be. That allows to get rid of one modulo operation inside the loop (because n/batchIdx
// now does not have to be computed through modulo, you are just looping over it), and
// bring a nice speed-up.
for (int batchIdx = batch; batchIdx < batchSize; batchIdx += nwarps){
// Warp-stride loop over elements in a batch item
for (IndexType idx = laneId; idx < imageElements; idx += C10_WARP_SIZE) {
// Need to calculate the following: batch position, and offset into the gradOutput
// in height, and width. We can intuit the corresponding position in the input from
// the other parameters we have
int go_w_offset = idx % outputWidth;
int go_h_offset = (idx / outputWidth);
int i_w_offset = (go_w_offset * strideWidth) + (kW * dilationWidth) - padWidth;
int i_h_offset = (go_h_offset * strideHeight) + (kH * dilationHeight) - padHeight;
if (i_w_offset >= 0 && i_h_offset >= 0 && i_w_offset < inputWidth && i_h_offset < inputHeight) {
int inputOffset = ((batchIdx * inputChannels + inputCh) * inputHeight + i_h_offset) * inputWidth + i_w_offset;
int outputOffset = ((batchIdx * kernelChannels + ch) * outputHeight ) * outputWidth + idx;
grad = THCNumerics<AccT>::add(
grad,
THCNumerics<AccT>::mul(
ScalarConvert<T, AccT>::to(input.data()[inputOffset]),
ScalarConvert<T, AccT>::to(gradOutput.data()[outputOffset])));
}
}
}
__syncthreads();
// At this point each thread in the block has a local gradient, which we need to
// accumulate prior to writing the global value
AccT *buf = smem.getPointer();
AccT tval = reduceBlock<AccT, ReduceAdd<AccT>>(
buf, blockDim.x, grad, ReduceAdd<AccT>(), ScalarConvert<float, AccT>::to(0));
// After reduction, first thread in the block has the gradient, so its responsible
// for writing it to gradWeight
if (threadIdx.x == 0) {
int weightOffset = kW + (kernelWidth * kH) + (kernelWidth * kernelHeight * ch);
gradWeight.data()[weightOffset] = ScalarConvert<AccT, T>::to(tval);
}
}
#include <THHUNN/generic/SpatialDepthwiseConvolution.hip>
#include <THH/THHGenerateFloatTypes.h>
| 9c5d497618f2fb81e56c9c5d61ca5db7415fc9e9.cu | // updateOutput, updateGradInput Kernels ported from Sergey Zagoruyko's pyinn, which itself was a
// port from Caffe
#include <THCUNN/THCUNN.h>
#include <THC/THCTensor.hpp>
#include <THC/THCDeviceTensor.cuh>
#include <THC/THCDeviceTensorUtils.cuh>
#include <THC/THCNumerics.cuh>
#include <THC/THCReduceApplyUtils.cuh>
#include <THC/THCSortUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THCUNN/SharedMem.cuh>
#include <THCUNN/common.h>
#include <algorithm>
#include <c10/macros/Macros.h>
// Crude benchmarks suggest 256 is better than 512 and 1024
// TODO: Autotune/use better heuristics, improve speed more.
const int MAX_BLOCK_SIZE = 256;
static int getGradParamsNumThreads(int batchSize){
//warp per item in a batch, up to a maximum
return std::min(batchSize * C10_WARP_SIZE, MAX_BLOCK_SIZE);
}
template <typename T, typename AccT, typename IndexType, int kSize>
__global__ void spatialDepthwiseConvolutionUpdateOutput(
const THCDeviceTensor<T, 4> input,
THCDeviceTensor<T, 4> output,
const THCDeviceTensor<T, 4> weight,
const THCDeviceTensor<T, 1> bias,
bool biasEnabled,
IndexType totalElements,
const int outputChannels,
const int depthwiseMultiplier,
const int inputWidth, const int inputHeight,
const int outputWidth, const int outputHeight,
const int kernelWidth, const int kernelHeight,
const int strideWidth, const int strideHeight,
const int padWidth, const int padHeight,
const int dilationWidth, const int dilationHeight)
{
const int KW_LIMIT = (kSize !=0) ? kSize : kernelWidth;
const int KH_LIMIT = (kSize !=0) ? kSize : kernelHeight;
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
//calculate n,c,h,w indices, replacing modulos by divide and multiply add,
//result is same as would be in the code below
//const int n = linearIndex / batchStride; //batchStride = outputChannels * outputHeight * outputWidth
//const int c = (linearIndex / channelStride) % outputChannels; //channelStride = outputHeight * outputWidth
//const int h = (linearIndex / outputWidth) % outputHeight;
//const int w = linearIndex % outputWidth;
int indtmp1 = linearIndex/outputWidth;
const int w = linearIndex - indtmp1 * outputWidth;
int indtmp2 = indtmp1/outputHeight;
const int h = indtmp1 - indtmp2 * outputHeight;
indtmp1 = indtmp2;
indtmp2 = indtmp1/outputChannels;
const int c = indtmp1 - indtmp2 * outputChannels;
const int n = indtmp2;
int inputChannel = c;
int inputChannels = outputChannels;
if (depthwiseMultiplier !=1) {
inputChannel /= depthwiseMultiplier;
inputChannels /= depthwiseMultiplier;
}
int weightOffset = c * kernelHeight * kernelWidth;
AccT value = biasEnabled ? ScalarConvert<T, AccT>::to(bias.data()[c]) : ScalarConvert<int, AccT>::to(0);
const IndexType offset0 = (n * inputChannels + inputChannel) * inputHeight * inputWidth;
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for (int kH = 0; kH < KH_LIMIT; ++kH) {
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for (int kW = 0; kW < KW_LIMIT; ++kW) {
const int h_in = -padHeight + h * strideHeight + kH * dilationHeight;
const int w_in = -padWidth + w * strideWidth + kW * dilationWidth;
if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) && (w_in < inputWidth)) {
const IndexType offset = offset0 + h_in * inputWidth + w_in;
value = THCNumerics<AccT>::add(
value,
THCNumerics<AccT>::mul(
ScalarConvert<T, AccT>::to(weight.data()[weightOffset]),
ScalarConvert<T, AccT>::to(input.data()[offset])));
}
++weightOffset;
}
}
output.data()[linearIndex] = ScalarConvert<AccT, T>::to(value);
}
}
template <typename T, typename AccT, typename IndexType, int kSize, int stride>
__global__ void spatialDepthwiseConvolutionUpdateGradInput(
const THCDeviceTensor<T, 4> gradOutput,
THCDeviceTensor<T, 4> gradInput,
const THCDeviceTensor<T, 4> weight,
IndexType totalElements,
const int inputChannels,
const int depthwiseMultiplier,
const int outputChannels,
const int inputWidth, const int inputHeight,
const int outputWidth, const int outputHeight,
const int kernelWidth, const int kernelHeight,
const int strideWidth, const int strideHeight,
const int padWidth, const int padHeight,
const int dilationWidth, const int dilationHeight)
{
const int KW_LIMIT = (kSize !=0) ? kSize : kernelWidth;
const int KH_LIMIT = (kSize !=0) ? kSize : kernelHeight;
const int strideW = (stride !=0) ? stride : strideWidth;
const int strideH = (stride !=0) ? stride : strideHeight;
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x) {
int indtmp1 = linearIndex/inputWidth;
const int w = linearIndex - indtmp1 * inputWidth;
int indtmp2 = indtmp1/inputHeight;
const int h = indtmp1 - indtmp2 * inputHeight;
indtmp1 = indtmp2;
indtmp2 = indtmp1/inputChannels;
const int c = indtmp1 - indtmp2 * inputChannels;
const int n = indtmp2;
AccT value = ScalarConvert<int, AccT>::to(0);
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for (int multiplier = 0; multiplier < depthwiseMultiplier; ++multiplier) {
int och = (c * depthwiseMultiplier) + multiplier;
int weightOffset = och * kernelHeight * kernelWidth;
#ifndef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for (int kh = 0; kh < KH_LIMIT; ++kh) {
#ifdef __HIP_PLATFORM_HCC__
#pragma unroll
#endif
for (int kw = 0; kw < KW_LIMIT; ++kw) {
int h_out = h + padHeight - kh * dilationHeight;
int w_out = w + padWidth - kw * dilationWidth;
if ((h_out % strideH == 0) && (w_out % strideW == 0)) {
h_out = h_out / strideH;
w_out = w_out / strideW;
if ((h_out >= 0) && (h_out < outputHeight)
&& (w_out >= 0) && (w_out < outputWidth)) {
const int offset = ((n * outputChannels + och) * outputHeight + h_out)
* outputWidth + w_out;
value = THCNumerics<AccT>::add(
value,
THCNumerics<AccT>::mul(
ScalarConvert<T, AccT>::to(weight.data()[weightOffset]),
ScalarConvert<T, AccT>::to(gradOutput.data()[offset])));
}
}
++weightOffset;
}
}
}
gradInput.data()[linearIndex] = ScalarConvert<AccT, T>::to(value);
}
}
template <typename T, typename AccT, typename IndexType>
__global__ void spatialDepthwiseConvolutionAccGradParameters(
const THCDeviceTensor<T, 4> gradOutput,
const THCDeviceTensor<T, 4> input,
THCDeviceTensor<T, 4> gradWeight,
const int batchSize,
const int inputChannels,
const int kernelChannels,
const int depthwiseMultiplier,
const int inputWidth, const int inputHeight,
const int outputWidth, const int outputHeight,
const int kernelWidth, const int kernelHeight,
const int strideWidth, const int strideHeight,
const int padWidth, const int padHeight,
const int dilationWidth, const int dilationHeight)
{
const int channelStride = kernelWidth * kernelHeight;
// Have to use a statically typed Shared Memory pointer
SharedMem<AccT> smem;
// Each Block is responsible for accumulating over a permutation of
// (channels x kH x kW), use blockIdx to determine which one
int bidx = blockIdx.x;
int kW = bidx % kernelWidth;
int kH = (bidx / kernelWidth) % kernelHeight;
int ch = (bidx / channelStride);
// Need to calculate which input channel is associated with this filter
// channel
int inputCh = ch / depthwiseMultiplier;
AccT grad = ScalarConvert<float, AccT>::to(0.0);
const int laneId = threadIdx.x % C10_WARP_SIZE;
const int batch = threadIdx.x / C10_WARP_SIZE;
const int nwarps = blockDim.x / C10_WARP_SIZE;
const int imageElements = outputWidth * outputHeight;
// Use warp per item. In the original kernel, a threadblock was used to sum over NHW.
// Here, we use a warp to sum values over HW dimension, and if batchSize is larger than the
// number of warps, a warp would loop over remaining batch items (e.g. if there are 8 warps,
// warp 0 would go over 0-8-16 etc image, warp 1 over 1-9-17 etc). Later in blockReduce,
// all the warps will be reduced anyway, thus the full reduction will be over NHW, like it
// should be. That allows to get rid of one modulo operation inside the loop (because n/batchIdx
// now does not have to be computed through modulo, you are just looping over it), and
// bring a nice speed-up.
for (int batchIdx = batch; batchIdx < batchSize; batchIdx += nwarps){
// Warp-stride loop over elements in a batch item
for (IndexType idx = laneId; idx < imageElements; idx += C10_WARP_SIZE) {
// Need to calculate the following: batch position, and offset into the gradOutput
// in height, and width. We can intuit the corresponding position in the input from
// the other parameters we have
int go_w_offset = idx % outputWidth;
int go_h_offset = (idx / outputWidth);
int i_w_offset = (go_w_offset * strideWidth) + (kW * dilationWidth) - padWidth;
int i_h_offset = (go_h_offset * strideHeight) + (kH * dilationHeight) - padHeight;
if (i_w_offset >= 0 && i_h_offset >= 0 && i_w_offset < inputWidth && i_h_offset < inputHeight) {
int inputOffset = ((batchIdx * inputChannels + inputCh) * inputHeight + i_h_offset) * inputWidth + i_w_offset;
int outputOffset = ((batchIdx * kernelChannels + ch) * outputHeight ) * outputWidth + idx;
grad = THCNumerics<AccT>::add(
grad,
THCNumerics<AccT>::mul(
ScalarConvert<T, AccT>::to(input.data()[inputOffset]),
ScalarConvert<T, AccT>::to(gradOutput.data()[outputOffset])));
}
}
}
__syncthreads();
// At this point each thread in the block has a local gradient, which we need to
// accumulate prior to writing the global value
AccT *buf = smem.getPointer();
AccT tval = reduceBlock<AccT, ReduceAdd<AccT>>(
buf, blockDim.x, grad, ReduceAdd<AccT>(), ScalarConvert<float, AccT>::to(0));
// After reduction, first thread in the block has the gradient, so its responsible
// for writing it to gradWeight
if (threadIdx.x == 0) {
int weightOffset = kW + (kernelWidth * kH) + (kernelWidth * kernelHeight * ch);
gradWeight.data()[weightOffset] = ScalarConvert<AccT, T>::to(tval);
}
}
#include <THCUNN/generic/SpatialDepthwiseConvolution.cu>
#include <THC/THCGenerateFloatTypes.h>
|
4bc773f975466cb68d86051a7f437dcfdbcc1abc.hip | // !!! This is a file automatically generated by hipify!!!
#include "gsimcore.cuh"
//#include "boid.cuh"
#ifdef _WIN32
#include <Windows.h>
#else
#include <sys/time.h>
#endif
//#include "test.cuh"
//#include "socialForce.cuh"
#include "socialForceEnhanced.cuh"
int main(int argc, char *argv[]){
//argv[1]: config.txt
//argv[2]: numAgent
init<SocialForceRoomAgentData>(argv[1]);
SocialForceRoomModel *model_h = new SocialForceRoomModel(&argv[2]);
/*Main work started here*/
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
doLoop(model_h);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("execution time: %f\n", time);
}
| 4bc773f975466cb68d86051a7f437dcfdbcc1abc.cu | #include "gsimcore.cuh"
//#include "boid.cuh"
#ifdef _WIN32
#include <Windows.h>
#else
#include <sys/time.h>
#endif
//#include "test.cuh"
//#include "socialForce.cuh"
#include "socialForceEnhanced.cuh"
int main(int argc, char *argv[]){
//argv[1]: config.txt
//argv[2]: numAgent
init<SocialForceRoomAgentData>(argv[1]);
SocialForceRoomModel *model_h = new SocialForceRoomModel(&argv[2]);
/*Main work started here*/
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
doLoop(model_h);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("execution time: %f\n", time);
}
|
3cce59e9bce6389f8f4e5833369f46a16dfda0f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* =======================================================
Student: Patricia Wilthew
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH
=======================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define BOX_SIZE 23000
/*
Structure: atom.
Descriptors for single atom in the tree.
*/
typedef struct atomdesc
{
double x_pos;
double y_pos;
double z_pos;
} atom;
/*
Structure: bucket.
Size of the buckets.
*/
typedef struct hist_entry
{
long long d_cnt;
} bucket;
hipError_t err;
long long PDH_acnt;
double PDH_res;
int num_buckets, PDH_threads;
bucket *histogram;
atom *atom_list;
struct timezone Idunno;
struct timeval startTime, endTime;
/*
Method: distance.
Distance of two points (x1, y1, z1) and (x2, y2, z2).
*/
__device__
double distance(double x1, double y1, double z1, double x2, double y2, double z2)
{
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
Method: PDH_on_gpu.
SDH solution in GPU threads.
*/
__global__
void PDH_on_gpu(double *x, double *y, double *z, bucket *hist,
int PDH_acnt, double PDH_res, int num_buckets)
{
extern __shared__ unsigned int SHMOut[];
int t_id, b_id, t, s;
int i, h_pos;
double x1, y1, z1, x2, y2, z2, d;
t_id = threadIdx.x;
b_id = blockIdx.x;
t = b_id*blockDim.x + t_id;
// Initialize Shared Memory to Zero.
for (s = 0; s < (num_buckets + blockDim.x - 1)/blockDim.x; s++)
{
if (t_id + s*blockDim.x < num_buckets)
{
SHMOut[t_id + s*blockDim.x] = 0;
}
}
// The t-th datum of b-th input data block.
i = t + 1;
x1 = x[t];
y1 = y[t];
z1 = z[t];
for (i=t+1; i < PDH_acnt; i++)
{
x2 = x[i];
y2 = y[i];
z2 = z[i];
d = distance(x1, y1, z1, x2, y2, z2);
h_pos = (int) (d / PDH_res);
atomicAdd(&SHMOut[h_pos], 1);
}
__syncthreads();
// Write results to Global Memory.
for (s = 0; s < (num_buckets + blockDim.x - 1)/blockDim.x; s++)
{
if (t_id + s*blockDim.x < num_buckets)
{
atomicAdd((unsigned int *)&hist[t_id + s*blockDim.x].d_cnt,
SHMOut[t_id + s*blockDim.x]);
}
}
}
/*
Method: p2p_distance.
Distance of two points in the atom_list.
*/
double p2p_distance(atom *atom_list, int ind1, int ind2)
{
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
Method: PDH_baseline.
Brute-force SDH solution in a single CPU thread.
*/
int PDH_baseline(atom *atom_list, bucket *histogram, long long PDH_acnt, double PDH_res)
{
int i, j, h_pos;
double dist;
for (i = 0; i < PDH_acnt; i++)
{
for (j = i+1; j < PDH_acnt; j++)
{
dist = p2p_distance(atom_list, i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
Method: report_running_time.
Set a checkpoint and show the (natural) running time in seconds.
*/
double report_running_time()
{
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if (usec_diff < 0)
{
sec_diff--;
usec_diff += 1000000;
}
printf("Running time: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
Method: output_histogram.
Print the counts in all buckets of the histogram.
*/
void output_histogram(bucket *histogram, int num_buckets)
{
int i;
long long total_cnt = 0;
for (i=0; i< num_buckets; i++)
{
if (i%5 == 0) // Print 5 buckets in a row.
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
// Also want to make sure the total distance count is correct.
if (i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
/*
Method: catch_error.
Prints any CUDA error to stdout.
*/
void catch_error(hipError_t error)
{
if (error)
{
printf("Error: %s\n", hipGetErrorString(err));
}
}
int main(int argc, char **argv)
{
if (argc <= 3)
{
printf("Usage: ./SDH {# Atoms} {# Buckets} {# Threads}\n");
exit(1);
}
if (atoi(argv[3]) < 32)
{
printf("Number of threads must be greater or equal to 32.\n");
exit(1);
}
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
PDH_threads = atoi(argv[3]);
// Variables declaration;
float time = 0;
int i;
double *x, *y, *z, *d_x, *d_y, *d_z;
bucket *d_histogram, *h_histogram;
// bucket *difference_histogram;
// Variables initialization and mem allocation.
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
atom_list = (atom *)malloc(sizeof(atom) * PDH_acnt);
histogram = (bucket *)malloc(sizeof(bucket) * num_buckets);
x = (double *)malloc(sizeof(double)*PDH_acnt);
y = (double *)malloc(sizeof(double)*PDH_acnt);
z = (double *)malloc(sizeof(double)*PDH_acnt);
h_histogram = (bucket *)malloc(sizeof(bucket) * num_buckets);
// difference_histogram = (bucket *)malloc(sizeof(bucket) * num_buckets);
err = hipSuccess;
srand(1);
// Generate data following a uniform distribution.
for (i = 0; i < PDH_acnt; i++)
{
x[i] = atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
y[i] = atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
z[i] = atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/*
printf("----CPU----");
// Start counting time.
gettimeofday(&startTime, &Idunno);
// Call CPU single thread version to compute the histogram.
PDH_baseline(atom_list, histogram, PDH_acnt, PDH_res);
// Check the total running time.
report_running_time();
// Print out the histogram.
output_histogram(histogram, num_buckets);
*/
/* My part of the project */
// Initialize h_histogram with zeroes.
for (i = 0; i < num_buckets; i++)
{
h_histogram[i].d_cnt = 0;
}
// Allocate memory in device for single dim arrays.
err = hipMalloc((void **)&d_x, PDH_acnt * sizeof(double)); catch_error(err);
err = hipMalloc((void **)&d_y, PDH_acnt * sizeof(double)); catch_error(err);
err = hipMalloc((void **)&d_z, PDH_acnt * sizeof(double)); catch_error(err);
// Allocate memory in device for histogram.
err = hipMalloc(&d_histogram, num_buckets * sizeof(bucket)); catch_error(err);
// Copy single dim arrays to device.
err = hipMemcpy(d_x, x, PDH_acnt * sizeof(double), hipMemcpyHostToDevice); catch_error(err);
err = hipMemcpy(d_y, y, PDH_acnt * sizeof(double), hipMemcpyHostToDevice); catch_error(err);
err = hipMemcpy(d_z, z, PDH_acnt * sizeof(double), hipMemcpyHostToDevice); catch_error(err);
// Copy zeroed histogram from host to device.
err = hipMemcpy(d_histogram, h_histogram, num_buckets * sizeof(bucket),
hipMemcpyHostToDevice); catch_error(err);
// Recording variables.
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Start to record.
hipEventRecord( start, 0);
// Call GPU version.
hipLaunchKernelGGL(( PDH_on_gpu), dim3((PDH_acnt - 1 + PDH_threads)/PDH_threads),
dim3(PDH_threads),
num_buckets * sizeof(int), 0, d_x, d_y, d_z,
d_histogram,
PDH_acnt,
PDH_res,
num_buckets);
// Stop recording.
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
// Copy histogram from device to host.
err = hipMemcpy(h_histogram, d_histogram, num_buckets * sizeof(bucket),
hipMemcpyDeviceToHost); catch_error(err);
// Print out the histogram.
output_histogram(h_histogram, num_buckets);
// Output the total running time.
printf("******** Total Running Time of Kernel = %.5f sec *******\n", time/1000.0);
/*
printf("\n----Difference between histograms:\n");
// Print the difference between the histograms.
for (i = 0; i < num_buckets; i++)
{
difference_histogram[i].d_cnt = abs(histogram[i].d_cnt - h_histogram[i].d_cnt);
}
// Print out the histograms' difference.
output_histogram(difference_histogram, num_buckets);
*/
// Free memory.
hipFree(d_x);
hipFree(d_y);
hipFree(d_z);
hipFree(d_histogram);
free(histogram);
free(h_histogram);
free(atom_list);
free(x);
free(y);
free(z);
return 0;
}
| 3cce59e9bce6389f8f4e5833369f46a16dfda0f2.cu | /* =======================================================
Student: Patricia Wilthew
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH
=======================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define BOX_SIZE 23000
/*
Structure: atom.
Descriptors for single atom in the tree.
*/
typedef struct atomdesc
{
double x_pos;
double y_pos;
double z_pos;
} atom;
/*
Structure: bucket.
Size of the buckets.
*/
typedef struct hist_entry
{
long long d_cnt;
} bucket;
cudaError_t err;
long long PDH_acnt;
double PDH_res;
int num_buckets, PDH_threads;
bucket *histogram;
atom *atom_list;
struct timezone Idunno;
struct timeval startTime, endTime;
/*
Method: distance.
Distance of two points (x1, y1, z1) and (x2, y2, z2).
*/
__device__
double distance(double x1, double y1, double z1, double x2, double y2, double z2)
{
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
Method: PDH_on_gpu.
SDH solution in GPU threads.
*/
__global__
void PDH_on_gpu(double *x, double *y, double *z, bucket *hist,
int PDH_acnt, double PDH_res, int num_buckets)
{
extern __shared__ unsigned int SHMOut[];
int t_id, b_id, t, s;
int i, h_pos;
double x1, y1, z1, x2, y2, z2, d;
t_id = threadIdx.x;
b_id = blockIdx.x;
t = b_id*blockDim.x + t_id;
// Initialize Shared Memory to Zero.
for (s = 0; s < (num_buckets + blockDim.x - 1)/blockDim.x; s++)
{
if (t_id + s*blockDim.x < num_buckets)
{
SHMOut[t_id + s*blockDim.x] = 0;
}
}
// The t-th datum of b-th input data block.
i = t + 1;
x1 = x[t];
y1 = y[t];
z1 = z[t];
for (i=t+1; i < PDH_acnt; i++)
{
x2 = x[i];
y2 = y[i];
z2 = z[i];
d = distance(x1, y1, z1, x2, y2, z2);
h_pos = (int) (d / PDH_res);
atomicAdd(&SHMOut[h_pos], 1);
}
__syncthreads();
// Write results to Global Memory.
for (s = 0; s < (num_buckets + blockDim.x - 1)/blockDim.x; s++)
{
if (t_id + s*blockDim.x < num_buckets)
{
atomicAdd((unsigned int *)&hist[t_id + s*blockDim.x].d_cnt,
SHMOut[t_id + s*blockDim.x]);
}
}
}
/*
Method: p2p_distance.
Distance of two points in the atom_list.
*/
double p2p_distance(atom *atom_list, int ind1, int ind2)
{
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
Method: PDH_baseline.
Brute-force SDH solution in a single CPU thread.
*/
int PDH_baseline(atom *atom_list, bucket *histogram, long long PDH_acnt, double PDH_res)
{
int i, j, h_pos;
double dist;
for (i = 0; i < PDH_acnt; i++)
{
for (j = i+1; j < PDH_acnt; j++)
{
dist = p2p_distance(atom_list, i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
Method: report_running_time.
Set a checkpoint and show the (natural) running time in seconds.
*/
double report_running_time()
{
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if (usec_diff < 0)
{
sec_diff--;
usec_diff += 1000000;
}
printf("Running time: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
Method: output_histogram.
Print the counts in all buckets of the histogram.
*/
void output_histogram(bucket *histogram, int num_buckets)
{
int i;
long long total_cnt = 0;
for (i=0; i< num_buckets; i++)
{
if (i%5 == 0) // Print 5 buckets in a row.
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
// Also want to make sure the total distance count is correct.
if (i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
/*
Method: catch_error.
Prints any CUDA error to stdout.
*/
void catch_error(cudaError_t error)
{
if (error)
{
printf("Error: %s\n", cudaGetErrorString(err));
}
}
int main(int argc, char **argv)
{
if (argc <= 3)
{
printf("Usage: ./SDH {# Atoms} {# Buckets} {# Threads}\n");
exit(1);
}
if (atoi(argv[3]) < 32)
{
printf("Number of threads must be greater or equal to 32.\n");
exit(1);
}
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
PDH_threads = atoi(argv[3]);
// Variables declaration;
float time = 0;
int i;
double *x, *y, *z, *d_x, *d_y, *d_z;
bucket *d_histogram, *h_histogram;
// bucket *difference_histogram;
// Variables initialization and mem allocation.
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
atom_list = (atom *)malloc(sizeof(atom) * PDH_acnt);
histogram = (bucket *)malloc(sizeof(bucket) * num_buckets);
x = (double *)malloc(sizeof(double)*PDH_acnt);
y = (double *)malloc(sizeof(double)*PDH_acnt);
z = (double *)malloc(sizeof(double)*PDH_acnt);
h_histogram = (bucket *)malloc(sizeof(bucket) * num_buckets);
// difference_histogram = (bucket *)malloc(sizeof(bucket) * num_buckets);
err = cudaSuccess;
srand(1);
// Generate data following a uniform distribution.
for (i = 0; i < PDH_acnt; i++)
{
x[i] = atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
y[i] = atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
z[i] = atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/*
printf("----CPU----");
// Start counting time.
gettimeofday(&startTime, &Idunno);
// Call CPU single thread version to compute the histogram.
PDH_baseline(atom_list, histogram, PDH_acnt, PDH_res);
// Check the total running time.
report_running_time();
// Print out the histogram.
output_histogram(histogram, num_buckets);
*/
/* My part of the project */
// Initialize h_histogram with zeroes.
for (i = 0; i < num_buckets; i++)
{
h_histogram[i].d_cnt = 0;
}
// Allocate memory in device for single dim arrays.
err = cudaMalloc((void **)&d_x, PDH_acnt * sizeof(double)); catch_error(err);
err = cudaMalloc((void **)&d_y, PDH_acnt * sizeof(double)); catch_error(err);
err = cudaMalloc((void **)&d_z, PDH_acnt * sizeof(double)); catch_error(err);
// Allocate memory in device for histogram.
err = cudaMalloc(&d_histogram, num_buckets * sizeof(bucket)); catch_error(err);
// Copy single dim arrays to device.
err = cudaMemcpy(d_x, x, PDH_acnt * sizeof(double), cudaMemcpyHostToDevice); catch_error(err);
err = cudaMemcpy(d_y, y, PDH_acnt * sizeof(double), cudaMemcpyHostToDevice); catch_error(err);
err = cudaMemcpy(d_z, z, PDH_acnt * sizeof(double), cudaMemcpyHostToDevice); catch_error(err);
// Copy zeroed histogram from host to device.
err = cudaMemcpy(d_histogram, h_histogram, num_buckets * sizeof(bucket),
cudaMemcpyHostToDevice); catch_error(err);
// Recording variables.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Start to record.
cudaEventRecord( start, 0);
// Call GPU version.
PDH_on_gpu<<<(PDH_acnt - 1 + PDH_threads)/PDH_threads,
PDH_threads,
num_buckets * sizeof(int)>>>(d_x, d_y, d_z,
d_histogram,
PDH_acnt,
PDH_res,
num_buckets);
// Stop recording.
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Copy histogram from device to host.
err = cudaMemcpy(h_histogram, d_histogram, num_buckets * sizeof(bucket),
cudaMemcpyDeviceToHost); catch_error(err);
// Print out the histogram.
output_histogram(h_histogram, num_buckets);
// Output the total running time.
printf("******** Total Running Time of Kernel = %.5f sec *******\n", time/1000.0);
/*
printf("\n----Difference between histograms:\n");
// Print the difference between the histograms.
for (i = 0; i < num_buckets; i++)
{
difference_histogram[i].d_cnt = abs(histogram[i].d_cnt - h_histogram[i].d_cnt);
}
// Print out the histograms' difference.
output_histogram(difference_histogram, num_buckets);
*/
// Free memory.
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
cudaFree(d_histogram);
free(histogram);
free(h_histogram);
free(atom_list);
free(x);
free(y);
free(z);
return 0;
}
|
c44fbf2d3843906072f67fb8db9a1d9ba1e035e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//******************************************
// operators
// based on min-app code written by Oliver Fuhrer, MeteoSwiss
// modified by Ben Cumming, CSCS
//
// implements
// *****************************************
// Description: Contains simple operators which can be used on 3d-meshes
#include "cuda_helpers.h"
#include "data.h"
#include "operators.h"
#include "stats.h"
namespace operators {
// POD type holding information for device
struct DiffusionParams {
int nx;
int ny;
double alpha;
double dxs;
double *x_old;
double *bndN;
double *bndE;
double *bndS;
double *bndW;
};
// TODO : explain what the params variable and setup_params_on_device() do
__device__
DiffusionParams params;
void setup_params_on_device(int nx, int ny, double alpha, double dxs)
{
auto p = DiffusionParams {
nx,
ny,
alpha,
dxs,
data::x_old.device_data(),
data::bndN.device_data(),
data::bndE.device_data(),
data::bndS.device_data(),
data::bndW.device_data()
};
cuda_check_status(
hipMemcpyToSymbol(params, &p, sizeof(DiffusionParams))
);
}
namespace kernels {
__global__
void stencil_shared(double* S, const double *U) {
double extern __shared__ buffer[];
auto nx = params.nx;
auto ny = params.ny;
auto bx = blockDim.x+2;
auto by = blockDim.y+2;
auto gi = threadIdx.x + blockDim.x*blockIdx.x;
auto gj = threadIdx.y + blockDim.y*blockIdx.y;
auto li = threadIdx.x + 1;
auto lj = threadIdx.y + 1;
auto gpos = gi + gj * nx;
auto lpos = li + lj * bx;
if(gi<nx && gj<ny) {
// load the shared memory
if(li==1) { // west boundary
if(gi==0)
buffer[lpos-1] = params.bndW[gj];
else
buffer[lpos-1] = U[gpos-1];
}
if(li==bx-2) { // east boundary
if(gi==nx-1)
buffer[lpos+1] = params.bndE[gj];
else
buffer[lpos+1] = U[gpos+1];
}
if(lj==1) { // south boundary
if(gj==0)
buffer[lpos-bx] = params.bndS[gi];
else
buffer[lpos-bx] = U[gpos-nx];
}
if(lj==by-2) { // south boundary
if(gj==ny-1)
buffer[lpos+bx] = params.bndN[gi];
else
buffer[lpos+bx] = U[gpos+nx];
}
buffer[lpos] = U[gpos];
__syncthreads();
S[gpos] = -(4. + params.alpha) * buffer[lpos] // central point
+ buffer[lpos-1] + buffer[lpos+1] // east and west
+ buffer[lpos-bx] + buffer[lpos+bx] // north and south
+ params.alpha * params.x_old[gpos]
+ params.dxs * buffer[lpos] * (1.0 - buffer[lpos]);
}
}
__global__
void stencil_interior(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x+1;
auto j = threadIdx.y + blockDim.y*blockIdx.y+1;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto pos = i+j*nx;
if (i<nx-1 && j<ny-1) {
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + U[pos+1] // east and west
+ U[pos-nx] + U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
// stencil implemented with a 1D launch configuration
__global__
void stencil_interior_1D(double* S, const double *U) {
// TODO : implement the interior stencil
// EXTRA : can you make it use shared memory?
// S(i,j) = -(4. + alpha) * U(i,j) // central point
// + U(i-1,j) + U(i+1,j) // east and west
// + U(i,j-1) + U(i,j+1) // north and south
// + alpha * x_old(i,j)
// + dxs * U(i,j) * (1.0 - U(i,j));
auto j = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(j > 0 && j < ny)
{
for (int i = 1; i < nx; i++)
{
auto pos = find_pos(i, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos-nx] + U[pos+nx]
+ U[pos+1] + alpha*params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
}
__global__
void stencil_east_west(double* S, const double *U) {
auto j = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(j>0 && j<ny-1) {
// EAST : i = nx-1
auto pos = find_pos(nx-1, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndE[j]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the WEST side
// WEST : i = 0
pos = find_pos(0, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos+1] + U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndW[j]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_north_south(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
if(i>0 && i<nx-1) {
// NORTH : j = ny -1
auto pos = i + nx*(ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos-nx]
+ alpha*params.x_old[pos] + params.bndN[i]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the SOUTH side
// SOUTH : j = 0
pos = i;
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndS[i]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_corners(double* S, const double* U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
// only 1 thread executes this kernel
if(i==0) {
// NORTH-EAST
auto pos = find_pos(nx-1, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[ny-1] // east and west
+ U[pos-nx] + params.bndN[nx-1] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-EAST
pos = find_pos(nx-1, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[0] // east and west
+ params.bndS[nx-1]+ U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-WEST
pos = find_pos(0, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[0] + U[pos+1] // east and west
+ params.bndS[0] + U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// NORTH-WEST
pos = find_pos(0, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[nx-1]+ U[pos+1] // east and west
+ U[pos-nx] + params.bndN[0] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
} // namespace kernels
//enum class Boundary {north, east, south, west};
void diffusion(data::Field const& U, data::Field &S)
{
using data::options;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::x_old;
double dxs = 1000. * (options.dx * options.dx);
double alpha = options.alpha;
int nx = options.nx;
int ny = options.ny;
// calculates the linear index into an array of width nx
// from an (i,j) coordinate pair
auto idx = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
static bool is_initialized = false;
if(!is_initialized) {
setup_params_on_device(nx, ny, alpha, dxs);
is_initialized = true;
}
// apply stencil to the interior grid points
// TODO: what is the purpose of the following?
auto calculate_grid_dim = [] (size_t n, size_t block_dim) {
return (n+block_dim-1)/block_dim;
};
// TODO: apply stencil to the interior grid points
dim3 block_dim(16,16);
dim3 grid_dim(
calculate_grid_dim(nx, block_dim.x),
calculate_grid_dim(ny, block_dim.y));
//#define STENCIL_SHARED
#ifdef STENCIL_SHARED
hipLaunchKernelGGL(( kernels::stencil_shared), dim3(grid_dim), dim3(block_dim), (block_dim.x+2)*(block_dim.y+2)*sizeof(double), 0, S.device_data(), U.device_data());
#else
// apply stencil to the interior grid points
hipLaunchKernelGGL(( kernels::stencil_interior), dim3(grid_dim), dim3(block_dim), 0, 0, S.device_data(), U.device_data());
// apply stencil to the interior grid points in 1D launch configuration
//auto grid_dim_int = calculate_grid_dim(ny, 64);
//kernels::stencil_interior_1D<<<grid_dim_int, 64>>>(S.device_data(), U.device_data());
// apply stencil at east-west boundary
auto bnd_grid_dim_y = calculate_grid_dim(ny, 64);
hipLaunchKernelGGL(( kernels::stencil_east_west), dim3(bnd_grid_dim_y), dim3(64), 0, 0, S.device_data(), U.device_data());
// apply stencil at north-south boundary
auto bnd_grid_dim_x = calculate_grid_dim(nx, 64);
hipLaunchKernelGGL(( kernels::stencil_north_south), dim3(bnd_grid_dim_x), dim3(64), 0, 0, S.device_data(), U.device_data());
// apply stencil at corners
hipLaunchKernelGGL(( kernels::stencil_corners), dim3(1), dim3(1), 0, 0, S.device_data(), U.device_data());
#endif
}
} // namespace operators | c44fbf2d3843906072f67fb8db9a1d9ba1e035e8.cu | //******************************************
// operators
// based on min-app code written by Oliver Fuhrer, MeteoSwiss
// modified by Ben Cumming, CSCS
//
// implements
// *****************************************
// Description: Contains simple operators which can be used on 3d-meshes
#include "cuda_helpers.h"
#include "data.h"
#include "operators.h"
#include "stats.h"
namespace operators {
// POD type holding information for device
struct DiffusionParams {
int nx;
int ny;
double alpha;
double dxs;
double *x_old;
double *bndN;
double *bndE;
double *bndS;
double *bndW;
};
// TODO : explain what the params variable and setup_params_on_device() do
__device__
DiffusionParams params;
void setup_params_on_device(int nx, int ny, double alpha, double dxs)
{
auto p = DiffusionParams {
nx,
ny,
alpha,
dxs,
data::x_old.device_data(),
data::bndN.device_data(),
data::bndE.device_data(),
data::bndS.device_data(),
data::bndW.device_data()
};
cuda_check_status(
cudaMemcpyToSymbol(params, &p, sizeof(DiffusionParams))
);
}
namespace kernels {
__global__
void stencil_shared(double* S, const double *U) {
double extern __shared__ buffer[];
auto nx = params.nx;
auto ny = params.ny;
auto bx = blockDim.x+2;
auto by = blockDim.y+2;
auto gi = threadIdx.x + blockDim.x*blockIdx.x;
auto gj = threadIdx.y + blockDim.y*blockIdx.y;
auto li = threadIdx.x + 1;
auto lj = threadIdx.y + 1;
auto gpos = gi + gj * nx;
auto lpos = li + lj * bx;
if(gi<nx && gj<ny) {
// load the shared memory
if(li==1) { // west boundary
if(gi==0)
buffer[lpos-1] = params.bndW[gj];
else
buffer[lpos-1] = U[gpos-1];
}
if(li==bx-2) { // east boundary
if(gi==nx-1)
buffer[lpos+1] = params.bndE[gj];
else
buffer[lpos+1] = U[gpos+1];
}
if(lj==1) { // south boundary
if(gj==0)
buffer[lpos-bx] = params.bndS[gi];
else
buffer[lpos-bx] = U[gpos-nx];
}
if(lj==by-2) { // south boundary
if(gj==ny-1)
buffer[lpos+bx] = params.bndN[gi];
else
buffer[lpos+bx] = U[gpos+nx];
}
buffer[lpos] = U[gpos];
__syncthreads();
S[gpos] = -(4. + params.alpha) * buffer[lpos] // central point
+ buffer[lpos-1] + buffer[lpos+1] // east and west
+ buffer[lpos-bx] + buffer[lpos+bx] // north and south
+ params.alpha * params.x_old[gpos]
+ params.dxs * buffer[lpos] * (1.0 - buffer[lpos]);
}
}
__global__
void stencil_interior(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x+1;
auto j = threadIdx.y + blockDim.y*blockIdx.y+1;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto pos = i+j*nx;
if (i<nx-1 && j<ny-1) {
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + U[pos+1] // east and west
+ U[pos-nx] + U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
// stencil implemented with a 1D launch configuration
__global__
void stencil_interior_1D(double* S, const double *U) {
// TODO : implement the interior stencil
// EXTRA : can you make it use shared memory?
// S(i,j) = -(4. + alpha) * U(i,j) // central point
// + U(i-1,j) + U(i+1,j) // east and west
// + U(i,j-1) + U(i,j+1) // north and south
// + alpha * x_old(i,j)
// + dxs * U(i,j) * (1.0 - U(i,j));
auto j = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(j > 0 && j < ny)
{
for (int i = 1; i < nx; i++)
{
auto pos = find_pos(i, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos-nx] + U[pos+nx]
+ U[pos+1] + alpha*params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
}
__global__
void stencil_east_west(double* S, const double *U) {
auto j = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(j>0 && j<ny-1) {
// EAST : i = nx-1
auto pos = find_pos(nx-1, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndE[j]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the WEST side
// WEST : i = 0
pos = find_pos(0, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos+1] + U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndW[j]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_north_south(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
if(i>0 && i<nx-1) {
// NORTH : j = ny -1
auto pos = i + nx*(ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos-nx]
+ alpha*params.x_old[pos] + params.bndN[i]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the SOUTH side
// SOUTH : j = 0
pos = i;
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndS[i]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_corners(double* S, const double* U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
// only 1 thread executes this kernel
if(i==0) {
// NORTH-EAST
auto pos = find_pos(nx-1, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[ny-1] // east and west
+ U[pos-nx] + params.bndN[nx-1] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-EAST
pos = find_pos(nx-1, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[0] // east and west
+ params.bndS[nx-1]+ U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-WEST
pos = find_pos(0, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[0] + U[pos+1] // east and west
+ params.bndS[0] + U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// NORTH-WEST
pos = find_pos(0, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[nx-1]+ U[pos+1] // east and west
+ U[pos-nx] + params.bndN[0] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
} // namespace kernels
//enum class Boundary {north, east, south, west};
void diffusion(data::Field const& U, data::Field &S)
{
using data::options;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::x_old;
double dxs = 1000. * (options.dx * options.dx);
double alpha = options.alpha;
int nx = options.nx;
int ny = options.ny;
// calculates the linear index into an array of width nx
// from an (i,j) coordinate pair
auto idx = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
static bool is_initialized = false;
if(!is_initialized) {
setup_params_on_device(nx, ny, alpha, dxs);
is_initialized = true;
}
// apply stencil to the interior grid points
// TODO: what is the purpose of the following?
auto calculate_grid_dim = [] (size_t n, size_t block_dim) {
return (n+block_dim-1)/block_dim;
};
// TODO: apply stencil to the interior grid points
dim3 block_dim(16,16);
dim3 grid_dim(
calculate_grid_dim(nx, block_dim.x),
calculate_grid_dim(ny, block_dim.y));
//#define STENCIL_SHARED
#ifdef STENCIL_SHARED
kernels::stencil_shared<<<grid_dim, block_dim, (block_dim.x+2)*(block_dim.y+2)*sizeof(double)>>>(S.device_data(), U.device_data());
#else
// apply stencil to the interior grid points
kernels::stencil_interior<<<grid_dim, block_dim>>>(S.device_data(), U.device_data());
// apply stencil to the interior grid points in 1D launch configuration
//auto grid_dim_int = calculate_grid_dim(ny, 64);
//kernels::stencil_interior_1D<<<grid_dim_int, 64>>>(S.device_data(), U.device_data());
// apply stencil at east-west boundary
auto bnd_grid_dim_y = calculate_grid_dim(ny, 64);
kernels::stencil_east_west<<<bnd_grid_dim_y, 64>>>(S.device_data(), U.device_data());
// apply stencil at north-south boundary
auto bnd_grid_dim_x = calculate_grid_dim(nx, 64);
kernels::stencil_north_south<<<bnd_grid_dim_x, 64>>>(S.device_data(), U.device_data());
// apply stencil at corners
kernels::stencil_corners<<<1, 1>>>(S.device_data(), U.device_data());
#endif
}
} // namespace operators |
d1bdfcb77b257eee817fe7828ddd7ed0aed8b56c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/fast_rcnn_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void RotateROIAlignForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data_x,
int* argmax_data_y) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// rois(oriented bounding box): batch_idx, xmin, ymin, xmax, ymax, theta
bottom_rois += n * 6;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w = bottom_rois[1] * spatial_scale;
Dtype roi_start_h = bottom_rois[2] * spatial_scale;
Dtype roi_end_w = bottom_rois[3] * spatial_scale;
Dtype roi_end_h = bottom_rois[4] * spatial_scale;
Dtype roi_theta = bottom_rois[5] / 180.f * 3.1415926;
Dtype roi_width = max(roi_end_w - roi_start_w + 1, 1.f);
Dtype roi_height = max(roi_end_h - roi_start_h + 1, 1.f);
Dtype roi_ctr_w = roi_start_w + roi_width * 0.5;
Dtype roi_ctr_h = roi_start_h + roi_height * 0.5;
// get affine matrix
Dtype affine[2][2];
affine[0][0] = static_cast<Dtype>(cos(roi_theta));
affine[0][1] = static_cast<Dtype>(sin(roi_theta));
affine[1][0] = static_cast<Dtype>(-sin(roi_theta));
affine[1][1] = static_cast<Dtype>(cos(roi_theta));
// Force malformed ROIs to be 1x1
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
Dtype hstart = static_cast<Dtype>(ph) * bin_size_h;
Dtype wstart = static_cast<Dtype>(pw) * bin_size_w;
Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h;
Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w;
hstart = hstart + roi_start_h;
hend = hend + roi_start_h;
wstart = wstart + roi_start_w;
wend = wend + roi_start_w;
// Define an empty pooling region to be zero
Dtype maxval = 0;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx_x = -1;
int maxidx_y = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (Dtype h = hstart; h < hend; h += 1.) {
for (Dtype w = wstart; w < wend; w += 1.) {
// rotated point
Dtype r_w = (w - roi_ctr_w) * affine[0][0] + (h - roi_ctr_h) * affine[0][1] + roi_ctr_w;
Dtype r_h = (w - roi_ctr_w) * affine[1][0] + (h - roi_ctr_h) * affine[1][1] + roi_ctr_h;
// Selecting four regular locations for bilinear interpolation
int x_left = floor(r_w);
int x_right = ceil(r_w);
int y_bottom = floor(r_h);
int y_top = ceil(r_h);
int top_left_index = y_top * width + x_left;
int top_right_index = y_top * width + x_right;
int bottom_left_index = y_bottom * width + x_left;
int bottom_right_index = y_bottom * width + x_right;
bool is_top_left_in = x_left >= 0 && x_left <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_top_right_in = x_right >= 0 && x_right <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_bottom_left_in = x_left >= 0 && x_left <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
bool is_bottom_right_in = x_right >= 0 && x_right <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
Dtype val = 0;
if (is_top_left_in)
val += (1 - r_w + x_left) * (1 - y_top + r_h) * bottom_data[top_left_index];
if (is_top_right_in)
val += (1 - x_right + r_w) * (1 - y_top + r_h) * bottom_data[top_right_index];
if (is_bottom_left_in)
val += (1 - r_w + x_left) * (1 - r_h + y_bottom) * bottom_data[bottom_left_index];
if (is_bottom_right_in)
val += (1 - x_right + r_w) * (1 - r_h + y_bottom) * bottom_data[bottom_right_index];
if (val > maxval) {
maxval = val;
maxidx_x = static_cast<int>(r_w);
maxidx_y = static_cast<int>(r_h);
}
}
}
top_data[index] = maxval;
argmax_data_x[index] = maxidx_x;
argmax_data_y[index] = maxidx_y;
}
}
template <typename Dtype>
void RotateROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data_x = max_idx_.mutable_gpu_data();
int* argmax_data_y = max_idy_.mutable_gpu_data();
if (bottom.size() > 2) {
const Dtype* scale_pred = bottom[2]->gpu_data();
caffe_gpu_asum<Dtype>(1, scale_pred, &spatial_scale_);
}
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( RotateROIAlignForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data_x, argmax_data_y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void RotateROIAlignBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data_x, const int* argmax_data_y, const int num_rois,
const Dtype spatial_scale, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = ceil(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = ceil(offset_bottom_rois[4] * spatial_scale);
Dtype roi_theta = static_cast<Dtype>(offset_bottom_rois[5] / 180.f * 3.1415926);
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
int roi_ctr_w = roi_start_w + roi_width * 0.5;
int roi_ctr_h = roi_start_h + roi_height * 0.5;
// get affine matrix
Dtype affine[2][2];
affine[0][0] = static_cast<Dtype>(cos(roi_theta));
affine[0][1] = static_cast<Dtype>(sin(roi_theta));
affine[1][0] = static_cast<Dtype>(-sin(roi_theta));
affine[1][1] = static_cast<Dtype>(cos(roi_theta));
// point in polygon
// add +-1 to make sure boundary points inside
int pt_a_w = roi_start_w - 1;
int pt_a_h = roi_start_h - 1;
int pt_b_w = roi_end_w + 1;
int pt_b_h = roi_start_h - 1;
int pt_c_w = roi_end_w + 1;
int pt_c_h = roi_end_h + 1;
int pt_d_w = roi_start_w - 1;
int pt_d_h = roi_end_h + 1;
int r_pt_a_w = round(static_cast<Dtype>(pt_a_w - roi_ctr_w) * affine[0][0] +
static_cast<Dtype>(pt_a_h - roi_ctr_h) * affine[0][1]) + roi_ctr_w;
int r_pt_a_h = round(static_cast<Dtype>(pt_a_w - roi_ctr_w) * affine[1][0] +
static_cast<Dtype>(pt_a_h - roi_ctr_h) * affine[1][1]) + roi_ctr_h;
int r_pt_b_w = round(static_cast<Dtype>(pt_b_w - roi_ctr_w) * affine[0][0] +
static_cast<Dtype>(pt_b_h - roi_ctr_h) * affine[0][1]) + roi_ctr_w;
int r_pt_b_h = round(static_cast<Dtype>(pt_b_w - roi_ctr_w) * affine[1][0] +
static_cast<Dtype>(pt_b_h - roi_ctr_h) * affine[1][1]) + roi_ctr_h;
int r_pt_c_w = round(static_cast<Dtype>(pt_c_w - roi_ctr_w) * affine[0][0] +
static_cast<Dtype>(pt_c_h - roi_ctr_h) * affine[0][1]) + roi_ctr_w;
int r_pt_c_h = round(static_cast<Dtype>(pt_c_w - roi_ctr_w) * affine[1][0] +
static_cast<Dtype>(pt_c_h - roi_ctr_h) * affine[1][1]) + roi_ctr_h;
int r_pt_d_w = round(static_cast<Dtype>(pt_d_w - roi_ctr_w) * affine[0][0] +
static_cast<Dtype>(pt_d_h - roi_ctr_h) * affine[0][1]) + roi_ctr_w;
int r_pt_d_h = round(static_cast<Dtype>(pt_d_w - roi_ctr_w) * affine[1][0] +
static_cast<Dtype>(pt_d_h - roi_ctr_h) * affine[1][1]) + roi_ctr_h;
Dtype aa = (r_pt_b_w - r_pt_a_w) * (h - r_pt_a_h) - (r_pt_b_h - r_pt_a_h) * (w - r_pt_a_w);
Dtype bb = (r_pt_c_w - r_pt_b_w) * (h - r_pt_b_h) - (r_pt_c_h - r_pt_b_h) * (w - r_pt_b_w);
Dtype cc = (r_pt_d_w - r_pt_c_w) * (h - r_pt_c_h) - (r_pt_d_h - r_pt_c_h) * (w - r_pt_c_w);
Dtype dd = (r_pt_a_w - r_pt_d_w) * (h - r_pt_d_h) - (r_pt_a_h - r_pt_d_h) * (w - r_pt_d_w);
if (!((aa > Dtype(0.) && bb > Dtype(0.) && cc > Dtype(0.) && dd > Dtype(0.)) ||
(aa < Dtype(0.) && bb < Dtype(0.) && cc < Dtype(0.) && dd < Dtype(0.)))) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data_x = argmax_data_x + offset;
const int* offset_argmax_data_y = argmax_data_y + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
// invert rotate (w, h) to align box
Dtype inv_affine[2][2];
inv_affine[0][0] = static_cast<Dtype>(cos(-roi_theta));
inv_affine[0][1] = static_cast<Dtype>(sin(-roi_theta));
inv_affine[1][0] = static_cast<Dtype>(-sin(-roi_theta));
inv_affine[1][1] = static_cast<Dtype>(cos(-roi_theta));
int inv_w = round(static_cast<Dtype>(w - roi_ctr_w) * inv_affine[0][0] +
static_cast<Dtype>(h - roi_ctr_h) * inv_affine[0][1]) + roi_ctr_w;
int inv_h = round(static_cast<Dtype>(w - roi_ctr_w) * inv_affine[1][0] +
static_cast<Dtype>(h - roi_ctr_h) * inv_affine[1][1]) + roi_ctr_h;
int phstart = floor(static_cast<Dtype>(inv_h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(inv_h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(inv_w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(inv_w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph <= phend; ++ph) {
for (int pw = pwstart; pw <= pwend; ++pw) {
int index = ph * pooled_width + pw;
Dtype max_x = offset_argmax_data_x[index];
Dtype max_y = offset_argmax_data_y[index];
int x_left = floor(max_x);
int x_right = ceil(max_x);
int y_bottom = floor(max_y);
int y_top = ceil(max_y);
if (x_left == w && y_top == h)
gradient += (1 - max_x + x_left) * (1 - y_top + max_y) * offset_top_diff[index];
else if (x_left == w && y_bottom == h)
gradient += (1 - max_x + x_left) * (1 - max_y + y_bottom) * offset_top_diff[index];
else if (x_right == w && y_top == h)
gradient += (1 - x_right + max_x) * (1 - y_top + max_y) * offset_top_diff[index];
else if (x_right == w && y_bottom == h)
gradient += (1 - x_right + max_x) * (1 - max_y + y_bottom) * offset_top_diff[index];
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void RotateROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data_x = max_idx_.gpu_data();
const int* argmax_data_y = max_idy_.gpu_data();
if (bottom.size() > 2) {
const Dtype* scale_pred = bottom[2]->gpu_data();
caffe_gpu_asum<Dtype>(1, scale_pred, &spatial_scale_);
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( RotateROIAlignBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data_x, argmax_data_y, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(RotateROIAlignLayer);
} // namespace caffe
| d1bdfcb77b257eee817fe7828ddd7ed0aed8b56c.cu | // ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/fast_rcnn_layers.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void RotateROIAlignForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data_x,
int* argmax_data_y) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// rois(oriented bounding box): batch_idx, xmin, ymin, xmax, ymax, theta
bottom_rois += n * 6;
int roi_batch_ind = bottom_rois[0];
Dtype roi_start_w = bottom_rois[1] * spatial_scale;
Dtype roi_start_h = bottom_rois[2] * spatial_scale;
Dtype roi_end_w = bottom_rois[3] * spatial_scale;
Dtype roi_end_h = bottom_rois[4] * spatial_scale;
Dtype roi_theta = bottom_rois[5] / 180.f * 3.1415926;
Dtype roi_width = max(roi_end_w - roi_start_w + 1, 1.f);
Dtype roi_height = max(roi_end_h - roi_start_h + 1, 1.f);
Dtype roi_ctr_w = roi_start_w + roi_width * 0.5;
Dtype roi_ctr_h = roi_start_h + roi_height * 0.5;
// get affine matrix
Dtype affine[2][2];
affine[0][0] = static_cast<Dtype>(cos(roi_theta));
affine[0][1] = static_cast<Dtype>(sin(roi_theta));
affine[1][0] = static_cast<Dtype>(-sin(roi_theta));
affine[1][1] = static_cast<Dtype>(cos(roi_theta));
// Force malformed ROIs to be 1x1
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
Dtype hstart = static_cast<Dtype>(ph) * bin_size_h;
Dtype wstart = static_cast<Dtype>(pw) * bin_size_w;
Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h;
Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w;
hstart = hstart + roi_start_h;
hend = hend + roi_start_h;
wstart = wstart + roi_start_w;
wend = wend + roi_start_w;
// Define an empty pooling region to be zero
Dtype maxval = 0;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx_x = -1;
int maxidx_y = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (Dtype h = hstart; h < hend; h += 1.) {
for (Dtype w = wstart; w < wend; w += 1.) {
// rotated point
Dtype r_w = (w - roi_ctr_w) * affine[0][0] + (h - roi_ctr_h) * affine[0][1] + roi_ctr_w;
Dtype r_h = (w - roi_ctr_w) * affine[1][0] + (h - roi_ctr_h) * affine[1][1] + roi_ctr_h;
// Selecting four regular locations for bilinear interpolation
int x_left = floor(r_w);
int x_right = ceil(r_w);
int y_bottom = floor(r_h);
int y_top = ceil(r_h);
int top_left_index = y_top * width + x_left;
int top_right_index = y_top * width + x_right;
int bottom_left_index = y_bottom * width + x_left;
int bottom_right_index = y_bottom * width + x_right;
bool is_top_left_in = x_left >= 0 && x_left <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_top_right_in = x_right >= 0 && x_right <= width - 1
&& y_top >= 0 && y_top <= height - 1;
bool is_bottom_left_in = x_left >= 0 && x_left <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
bool is_bottom_right_in = x_right >= 0 && x_right <= width - 1
&& y_bottom >= 0 && y_bottom <= height - 1;
Dtype val = 0;
if (is_top_left_in)
val += (1 - r_w + x_left) * (1 - y_top + r_h) * bottom_data[top_left_index];
if (is_top_right_in)
val += (1 - x_right + r_w) * (1 - y_top + r_h) * bottom_data[top_right_index];
if (is_bottom_left_in)
val += (1 - r_w + x_left) * (1 - r_h + y_bottom) * bottom_data[bottom_left_index];
if (is_bottom_right_in)
val += (1 - x_right + r_w) * (1 - r_h + y_bottom) * bottom_data[bottom_right_index];
if (val > maxval) {
maxval = val;
maxidx_x = static_cast<int>(r_w);
maxidx_y = static_cast<int>(r_h);
}
}
}
top_data[index] = maxval;
argmax_data_x[index] = maxidx_x;
argmax_data_y[index] = maxidx_y;
}
}
template <typename Dtype>
void RotateROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data_x = max_idx_.mutable_gpu_data();
int* argmax_data_y = max_idy_.mutable_gpu_data();
if (bottom.size() > 2) {
const Dtype* scale_pred = bottom[2]->gpu_data();
caffe_gpu_asum<Dtype>(1, scale_pred, &spatial_scale_);
}
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
RotateROIAlignForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data_x, argmax_data_y);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void RotateROIAlignBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data_x, const int* argmax_data_y, const int num_rois,
const Dtype spatial_scale, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = ceil(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = ceil(offset_bottom_rois[4] * spatial_scale);
Dtype roi_theta = static_cast<Dtype>(offset_bottom_rois[5] / 180.f * 3.1415926);
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
int roi_ctr_w = roi_start_w + roi_width * 0.5;
int roi_ctr_h = roi_start_h + roi_height * 0.5;
// get affine matrix
Dtype affine[2][2];
affine[0][0] = static_cast<Dtype>(cos(roi_theta));
affine[0][1] = static_cast<Dtype>(sin(roi_theta));
affine[1][0] = static_cast<Dtype>(-sin(roi_theta));
affine[1][1] = static_cast<Dtype>(cos(roi_theta));
// point in polygon
// add +-1 to make sure boundary points inside
int pt_a_w = roi_start_w - 1;
int pt_a_h = roi_start_h - 1;
int pt_b_w = roi_end_w + 1;
int pt_b_h = roi_start_h - 1;
int pt_c_w = roi_end_w + 1;
int pt_c_h = roi_end_h + 1;
int pt_d_w = roi_start_w - 1;
int pt_d_h = roi_end_h + 1;
int r_pt_a_w = round(static_cast<Dtype>(pt_a_w - roi_ctr_w) * affine[0][0] +
static_cast<Dtype>(pt_a_h - roi_ctr_h) * affine[0][1]) + roi_ctr_w;
int r_pt_a_h = round(static_cast<Dtype>(pt_a_w - roi_ctr_w) * affine[1][0] +
static_cast<Dtype>(pt_a_h - roi_ctr_h) * affine[1][1]) + roi_ctr_h;
int r_pt_b_w = round(static_cast<Dtype>(pt_b_w - roi_ctr_w) * affine[0][0] +
static_cast<Dtype>(pt_b_h - roi_ctr_h) * affine[0][1]) + roi_ctr_w;
int r_pt_b_h = round(static_cast<Dtype>(pt_b_w - roi_ctr_w) * affine[1][0] +
static_cast<Dtype>(pt_b_h - roi_ctr_h) * affine[1][1]) + roi_ctr_h;
int r_pt_c_w = round(static_cast<Dtype>(pt_c_w - roi_ctr_w) * affine[0][0] +
static_cast<Dtype>(pt_c_h - roi_ctr_h) * affine[0][1]) + roi_ctr_w;
int r_pt_c_h = round(static_cast<Dtype>(pt_c_w - roi_ctr_w) * affine[1][0] +
static_cast<Dtype>(pt_c_h - roi_ctr_h) * affine[1][1]) + roi_ctr_h;
int r_pt_d_w = round(static_cast<Dtype>(pt_d_w - roi_ctr_w) * affine[0][0] +
static_cast<Dtype>(pt_d_h - roi_ctr_h) * affine[0][1]) + roi_ctr_w;
int r_pt_d_h = round(static_cast<Dtype>(pt_d_w - roi_ctr_w) * affine[1][0] +
static_cast<Dtype>(pt_d_h - roi_ctr_h) * affine[1][1]) + roi_ctr_h;
Dtype aa = (r_pt_b_w - r_pt_a_w) * (h - r_pt_a_h) - (r_pt_b_h - r_pt_a_h) * (w - r_pt_a_w);
Dtype bb = (r_pt_c_w - r_pt_b_w) * (h - r_pt_b_h) - (r_pt_c_h - r_pt_b_h) * (w - r_pt_b_w);
Dtype cc = (r_pt_d_w - r_pt_c_w) * (h - r_pt_c_h) - (r_pt_d_h - r_pt_c_h) * (w - r_pt_c_w);
Dtype dd = (r_pt_a_w - r_pt_d_w) * (h - r_pt_d_h) - (r_pt_a_h - r_pt_d_h) * (w - r_pt_d_w);
if (!((aa > Dtype(0.) && bb > Dtype(0.) && cc > Dtype(0.) && dd > Dtype(0.)) ||
(aa < Dtype(0.) && bb < Dtype(0.) && cc < Dtype(0.) && dd < Dtype(0.)))) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data_x = argmax_data_x + offset;
const int* offset_argmax_data_y = argmax_data_y + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
// invert rotate (w, h) to align box
Dtype inv_affine[2][2];
inv_affine[0][0] = static_cast<Dtype>(cos(-roi_theta));
inv_affine[0][1] = static_cast<Dtype>(sin(-roi_theta));
inv_affine[1][0] = static_cast<Dtype>(-sin(-roi_theta));
inv_affine[1][1] = static_cast<Dtype>(cos(-roi_theta));
int inv_w = round(static_cast<Dtype>(w - roi_ctr_w) * inv_affine[0][0] +
static_cast<Dtype>(h - roi_ctr_h) * inv_affine[0][1]) + roi_ctr_w;
int inv_h = round(static_cast<Dtype>(w - roi_ctr_w) * inv_affine[1][0] +
static_cast<Dtype>(h - roi_ctr_h) * inv_affine[1][1]) + roi_ctr_h;
int phstart = floor(static_cast<Dtype>(inv_h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(inv_h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(inv_w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(inv_w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph <= phend; ++ph) {
for (int pw = pwstart; pw <= pwend; ++pw) {
int index = ph * pooled_width + pw;
Dtype max_x = offset_argmax_data_x[index];
Dtype max_y = offset_argmax_data_y[index];
int x_left = floor(max_x);
int x_right = ceil(max_x);
int y_bottom = floor(max_y);
int y_top = ceil(max_y);
if (x_left == w && y_top == h)
gradient += (1 - max_x + x_left) * (1 - y_top + max_y) * offset_top_diff[index];
else if (x_left == w && y_bottom == h)
gradient += (1 - max_x + x_left) * (1 - max_y + y_bottom) * offset_top_diff[index];
else if (x_right == w && y_top == h)
gradient += (1 - x_right + max_x) * (1 - y_top + max_y) * offset_top_diff[index];
else if (x_right == w && y_bottom == h)
gradient += (1 - x_right + max_x) * (1 - max_y + y_bottom) * offset_top_diff[index];
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void RotateROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data_x = max_idx_.gpu_data();
const int* argmax_data_y = max_idy_.gpu_data();
if (bottom.size() > 2) {
const Dtype* scale_pred = bottom[2]->gpu_data();
caffe_gpu_asum<Dtype>(1, scale_pred, &spatial_scale_);
}
// NOLINT_NEXT_LINE(whitespace/operators)
RotateROIAlignBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data_x, argmax_data_y, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(RotateROIAlignLayer);
} // namespace caffe
|
f996cd25a64b930a9c95fdf8da1fad7c13eedf34.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const int device_id, const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(device_id), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const int device_id, const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(device_id), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const int device_id, const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(device_id), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const int device_id, const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(device_id), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int device_id, const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(device_id), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int device_id, const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(device_id), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int device_id, const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(device_id), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int device_id, const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(device_id), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int device_id, const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(device_id, N, beta, Y);
caffe_gpu_axpy<float>(device_id, N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int device_id, const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(device_id, N, beta, Y);
caffe_gpu_axpy<double>(device_id, N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int device_id, const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(device_id), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int device_id, const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(device_id), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int device_id, const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(device_id), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int device_id, const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(device_id), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int device_id, const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(device_id), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(device_id), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int device_id, const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(device_id), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(device_id), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
template <>
uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x,
const float* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( popc_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x,
const double* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( popcll_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
void caffe_gpu_rng_uniform(const int device_id, const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(device_id), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int device_id, const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(device_id), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(device_id, n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int device_id, const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(device_id), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(device_id, n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int device_id, const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(device_id), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int device_id, const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(device_id), r, n, mu, sigma));
}
} // namespace caffe
| f996cd25a64b930a9c95fdf8da1fad7c13eedf34.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const int device_id, const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(device_id), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const int device_id, const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(device_id), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const int device_id, const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(device_id), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const int device_id, const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(device_id), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int device_id, const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(device_id), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int device_id, const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(device_id), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int device_id, const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(device_id), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int device_id, const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(device_id), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int device_id, const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(device_id, N, beta, Y);
caffe_gpu_axpy<float>(device_id, N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int device_id, const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(device_id, N, beta, Y);
caffe_gpu_axpy<double>(device_id, N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int device_id, const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(device_id), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int device_id, const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(device_id), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int device_id, const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(device_id), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int device_id, const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(device_id), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int device_id, const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(device_id), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(device_id), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int device_id, const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(device_id), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(device_id), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
template <>
uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x,
const float* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
popc_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x,
const double* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
popcll_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
void caffe_gpu_rng_uniform(const int device_id, const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(device_id), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int device_id, const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(device_id), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(device_id, n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int device_id, const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(device_id), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(device_id, n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int device_id, const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(device_id), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int device_id, const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(device_id), r, n, mu, sigma));
}
} // namespace caffe
|
6445ec7c0637151936be2dc00a5a0eb0d9b9d62b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matrix_math.h"
float *dev_mat_A;
float *dev_mat_B;
float *dev_mat_C;
hipEvent_t start, stop;
void CUDA_matrix_math::initialize() {
hipMalloc((void**)&dev_mat_A, sizeof(float) * 25);
hipMalloc((void**)&dev_mat_B, sizeof(float) * 25);
hipMalloc((void**)&dev_mat_C, sizeof(float) * 25);
}
void CUDA_matrix_math::teardown() {
hipFree(dev_mat_A);
hipFree(dev_mat_B);
hipFree(dev_mat_C);
}
__global__ void mat_add(float *A, float *B, float *C) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 4 || j > 4) return;
int index = i + j * 5;
C[index] = A[index] + B[index];
}
__global__ void mat_sub(float *A, float *B, float *C) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 4 || j > 4) return;
int index = i + j * 5;
C[index] = A[index] - B[index];
}
__global__ void mat_mul(float *A, float *B, float *C) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 4 || j > 4) return;
int index = i + j * 5;
float dot_product = 0.0f;
// 0 1 2 3 4
// 5 6 7 8 9
// 10 11 12 13 14
// 15 16 17 18 19
// 20 21 22 23 24
int col_index = index % 5;
int row_index = (index / 5) * 5;
for (int k = 0; k < 5; k++) {
dot_product += A[row_index] * B[col_index];
col_index += 5;
row_index += 1;
}
C[index] = dot_product;
}
static void setup_timer_events() {
hipEventCreate(&start);
hipEventCreate(&stop);
}
static float teardown_timer_events() {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
return milliseconds;
}
void CUDA_matrix_math::cuda_mat_add(float *A, float *B, float *C) {
hipMemcpy(dev_mat_A, A, 25 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_mat_B, B, 25 * sizeof(float), hipMemcpyHostToDevice);
dim3 dimBlock(5, 5);
dim3 dimGrid(1, 1);
setup_timer_events();
hipEventRecord(start);
hipLaunchKernelGGL(( mat_add) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dev_mat_A, dev_mat_B, dev_mat_C);
hipEventRecord(stop);
float time = teardown_timer_events();
printf("Addition operation took about %f milliseconds.\n", time);
hipMemcpy(C, dev_mat_C, 25 * sizeof(float), hipMemcpyDeviceToHost);
}
void CUDA_matrix_math::cuda_mat_sub(float *A, float *B, float *C) {
hipMemcpy(dev_mat_A, A, 25 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_mat_B, B, 25 * sizeof(float), hipMemcpyHostToDevice);
dim3 dimBlock(5, 5);
dim3 dimGrid(1, 1);
setup_timer_events();
hipEventRecord(start);
hipLaunchKernelGGL(( mat_sub) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dev_mat_A, dev_mat_B, dev_mat_C);
hipEventRecord(stop);
float time = teardown_timer_events();
printf("Subtraction operation took about %f milliseconds.\n", time);
hipMemcpy(C, dev_mat_C, 25 * sizeof(float), hipMemcpyDeviceToHost);
}
void CUDA_matrix_math::cuda_mat_mul(float *A, float *B, float *C) {
hipMemcpy(dev_mat_A, A, 25 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_mat_B, B, 25 * sizeof(float), hipMemcpyHostToDevice);
dim3 dimBlock(5, 5);
dim3 dimGrid(1, 1);
setup_timer_events();
hipEventRecord(start);
hipLaunchKernelGGL(( mat_mul) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dev_mat_A, dev_mat_B, dev_mat_C);
hipEventRecord(stop);
float time = teardown_timer_events();
printf("Multiplication operation took about %f milliseconds.\n", time);
hipMemcpy(C, dev_mat_C, 25 * sizeof(float), hipMemcpyDeviceToHost);
} | 6445ec7c0637151936be2dc00a5a0eb0d9b9d62b.cu | #include "matrix_math.h"
float *dev_mat_A;
float *dev_mat_B;
float *dev_mat_C;
cudaEvent_t start, stop;
void CUDA_matrix_math::initialize() {
cudaMalloc((void**)&dev_mat_A, sizeof(float) * 25);
cudaMalloc((void**)&dev_mat_B, sizeof(float) * 25);
cudaMalloc((void**)&dev_mat_C, sizeof(float) * 25);
}
void CUDA_matrix_math::teardown() {
cudaFree(dev_mat_A);
cudaFree(dev_mat_B);
cudaFree(dev_mat_C);
}
__global__ void mat_add(float *A, float *B, float *C) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 4 || j > 4) return;
int index = i + j * 5;
C[index] = A[index] + B[index];
}
__global__ void mat_sub(float *A, float *B, float *C) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 4 || j > 4) return;
int index = i + j * 5;
C[index] = A[index] - B[index];
}
__global__ void mat_mul(float *A, float *B, float *C) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 4 || j > 4) return;
int index = i + j * 5;
float dot_product = 0.0f;
// 0 1 2 3 4
// 5 6 7 8 9
// 10 11 12 13 14
// 15 16 17 18 19
// 20 21 22 23 24
int col_index = index % 5;
int row_index = (index / 5) * 5;
for (int k = 0; k < 5; k++) {
dot_product += A[row_index] * B[col_index];
col_index += 5;
row_index += 1;
}
C[index] = dot_product;
}
static void setup_timer_events() {
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
static float teardown_timer_events() {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return milliseconds;
}
void CUDA_matrix_math::cuda_mat_add(float *A, float *B, float *C) {
cudaMemcpy(dev_mat_A, A, 25 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_mat_B, B, 25 * sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(5, 5);
dim3 dimGrid(1, 1);
setup_timer_events();
cudaEventRecord(start);
mat_add <<<dimGrid, dimBlock >>>(dev_mat_A, dev_mat_B, dev_mat_C);
cudaEventRecord(stop);
float time = teardown_timer_events();
printf("Addition operation took about %f milliseconds.\n", time);
cudaMemcpy(C, dev_mat_C, 25 * sizeof(float), cudaMemcpyDeviceToHost);
}
void CUDA_matrix_math::cuda_mat_sub(float *A, float *B, float *C) {
cudaMemcpy(dev_mat_A, A, 25 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_mat_B, B, 25 * sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(5, 5);
dim3 dimGrid(1, 1);
setup_timer_events();
cudaEventRecord(start);
mat_sub <<<dimGrid, dimBlock >>>(dev_mat_A, dev_mat_B, dev_mat_C);
cudaEventRecord(stop);
float time = teardown_timer_events();
printf("Subtraction operation took about %f milliseconds.\n", time);
cudaMemcpy(C, dev_mat_C, 25 * sizeof(float), cudaMemcpyDeviceToHost);
}
void CUDA_matrix_math::cuda_mat_mul(float *A, float *B, float *C) {
cudaMemcpy(dev_mat_A, A, 25 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_mat_B, B, 25 * sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(5, 5);
dim3 dimGrid(1, 1);
setup_timer_events();
cudaEventRecord(start);
mat_mul <<<dimGrid, dimBlock >>>(dev_mat_A, dev_mat_B, dev_mat_C);
cudaEventRecord(stop);
float time = teardown_timer_events();
printf("Multiplication operation took about %f milliseconds.\n", time);
cudaMemcpy(C, dev_mat_C, 25 * sizeof(float), cudaMemcpyDeviceToHost);
} |
54bd2a3bb0b2b135c3d196c3dae2463b8142f153.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "test_utils.h"
#include "linalg/multiply.h"
#include "random/rng.h"
#include "unary_op.h"
namespace MLCommon {
namespace LinAlg {
template<typename T>
class MultiplyTest : public ::testing::TestWithParam<UnaryOpInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<UnaryOpInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int len = params.len;
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0));
naiveScale(out_ref, in, params.scalar, len);
multiplyScalar(out, in, params.scalar, len);
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
}
protected:
UnaryOpInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<UnaryOpInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef MultiplyTest<float> MultiplyTestF;
TEST_P(MultiplyTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestF,
::testing::ValuesIn(inputsf));
typedef MultiplyTest<double> MultiplyTestD;
const std::vector<UnaryOpInputs<double>> inputsd = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
TEST_P(MultiplyTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
| 54bd2a3bb0b2b135c3d196c3dae2463b8142f153.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "test_utils.h"
#include "linalg/multiply.h"
#include "random/rng.h"
#include "unary_op.h"
namespace MLCommon {
namespace LinAlg {
template<typename T>
class MultiplyTest : public ::testing::TestWithParam<UnaryOpInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<UnaryOpInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int len = params.len;
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0));
naiveScale(out_ref, in, params.scalar, len);
multiplyScalar(out, in, params.scalar, len);
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
}
protected:
UnaryOpInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<UnaryOpInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
typedef MultiplyTest<float> MultiplyTestF;
TEST_P(MultiplyTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestF,
::testing::ValuesIn(inputsf));
typedef MultiplyTest<double> MultiplyTestD;
const std::vector<UnaryOpInputs<double>> inputsd = {
{0.000001f, 1024 * 1024, 2.f, 1234ULL}};
TEST_P(MultiplyTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MultiplyTests, MultiplyTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
|
b08335a0702614d877512ea72df8cd319c030be3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_latency_setup_kernel(int *buffer, size_t delta, size_t elements)
{
size_t ofs = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
while(ofs < elements) {
size_t tgt = ofs + delta;
while(tgt >= elements)
tgt -= elements;
buffer[ofs] = tgt;
ofs += step;
}
} | b08335a0702614d877512ea72df8cd319c030be3.cu | #include "includes.h"
__global__ void gpu_latency_setup_kernel(int *buffer, size_t delta, size_t elements)
{
size_t ofs = blockIdx.x * blockDim.x + threadIdx.x;
size_t step = blockDim.x * gridDim.x;
while(ofs < elements) {
size_t tgt = ofs + delta;
while(tgt >= elements)
tgt -= elements;
buffer[ofs] = tgt;
ofs += step;
}
} |
1216cd8c148be491f595dd06beb30ade3a478742.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <raft/random/rng.cuh>
#include <raft/mr/device/allocator.hpp>
#include <sparse/convert/csr.cuh>
#include <sparse/coo.cuh>
#include <iostream>
namespace raft {
namespace sparse {
/**************************** sorted COO to CSR ****************************/
template <typename T>
struct SparseConvertCSRInputs {
int m, n, nnz;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const SparseConvertCSRInputs<T> &dims) {
return os;
}
template <typename T>
class SparseConvertCSRTest
: public ::testing::TestWithParam<SparseConvertCSRInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
SparseConvertCSRInputs<T> params;
};
const std::vector<SparseConvertCSRInputs<float>> inputsf = {
{5, 10, 5, 1234ULL}};
typedef SparseConvertCSRTest<float> SortedCOOToCSR;
TEST_P(SortedCOOToCSR, Result) {
hipStream_t stream;
hipStreamCreate(&stream);
std::shared_ptr<raft::mr::device::allocator> alloc(
new raft::mr::device::default_allocator);
int nnz = 8;
int *in, *out, *exp;
int *in_h = new int[nnz]{0, 0, 1, 1, 2, 2, 3, 3};
int *exp_h = new int[4]{0, 2, 4, 6};
raft::allocate(in, nnz, true);
raft::allocate(exp, 4, true);
raft::allocate(out, 4, true);
raft::update_device(in, in_h, nnz, stream);
raft::update_device(exp, exp_h, 4, stream);
convert::sorted_coo_to_csr<int>(in, nnz, out, 4, alloc, stream);
ASSERT_TRUE(raft::devArrMatch<int>(out, exp, 4, raft::Compare<int>()));
hipStreamDestroy(stream);
delete[] in_h;
delete[] exp_h;
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(exp));
CUDA_CHECK(hipFree(out));
}
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, SortedCOOToCSR,
::testing::ValuesIn(inputsf));
/******************************** adj graph ********************************/
template <typename Index_>
struct CSRAdjGraphInputs {
Index_ n_rows;
Index_ n_cols;
std::vector<Index_> row_ind;
std::vector<uint8_t> adj; // To avoid vector<bool> optimization
std::vector<Index_> verify;
};
template <typename Index_>
class CSRAdjGraphTest
: public ::testing::TestWithParam<CSRAdjGraphInputs<Index_>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<CSRAdjGraphInputs<Index_>>::GetParam();
hipStreamCreate(&stream);
nnz = params.verify.size();
raft::allocate(row_ind, params.n_rows);
raft::allocate(adj, params.n_rows * params.n_cols);
raft::allocate(result, nnz, true);
raft::allocate(verify, nnz);
}
void Run() {
raft::update_device(row_ind, params.row_ind.data(), params.n_rows, stream);
raft::update_device(adj, reinterpret_cast<bool *>(params.adj.data()),
params.n_rows * params.n_cols, stream);
raft::update_device(verify, params.verify.data(), nnz, stream);
convert::csr_adj_graph_batched<Index_, 32>(
row_ind, params.n_cols, nnz, params.n_rows, adj, result, stream);
ASSERT_TRUE(
raft::devArrMatch<Index_>(verify, result, nnz, raft::Compare<Index_>()));
}
void TearDown() override {
CUDA_CHECK(hipFree(row_ind));
CUDA_CHECK(hipFree(adj));
CUDA_CHECK(hipFree(verify));
CUDA_CHECK(hipFree(result));
hipStreamDestroy(stream);
}
protected:
CSRAdjGraphInputs<Index_> params;
hipStream_t stream;
Index_ nnz;
Index_ *row_ind, *result, *verify;
bool *adj;
};
using CSRAdjGraphTestI = CSRAdjGraphTest<int>;
TEST_P(CSRAdjGraphTestI, Result) { Run(); }
using CSRAdjGraphTestL = CSRAdjGraphTest<int64_t>;
TEST_P(CSRAdjGraphTestL, Result) { Run(); }
const std::vector<CSRAdjGraphInputs<int>> csradjgraph_inputs_i = {
{3,
6,
{0, 3, 6},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 1, 2, 0, 1, 2, 0, 1, 2}},
};
const std::vector<CSRAdjGraphInputs<int64_t>> csradjgraph_inputs_l = {
{3,
6,
{0, 3, 6},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 1, 2, 0, 1, 2, 0, 1, 2}},
};
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, CSRAdjGraphTestI,
::testing::ValuesIn(csradjgraph_inputs_i));
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, CSRAdjGraphTestL,
::testing::ValuesIn(csradjgraph_inputs_l));
} // namespace sparse
} // namespace raft
| 1216cd8c148be491f595dd06beb30ade3a478742.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <raft/random/rng.cuh>
#include <raft/mr/device/allocator.hpp>
#include <sparse/convert/csr.cuh>
#include <sparse/coo.cuh>
#include <iostream>
namespace raft {
namespace sparse {
/**************************** sorted COO to CSR ****************************/
template <typename T>
struct SparseConvertCSRInputs {
int m, n, nnz;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os,
const SparseConvertCSRInputs<T> &dims) {
return os;
}
template <typename T>
class SparseConvertCSRTest
: public ::testing::TestWithParam<SparseConvertCSRInputs<T>> {
protected:
void SetUp() override {}
void TearDown() override {}
protected:
SparseConvertCSRInputs<T> params;
};
const std::vector<SparseConvertCSRInputs<float>> inputsf = {
{5, 10, 5, 1234ULL}};
typedef SparseConvertCSRTest<float> SortedCOOToCSR;
TEST_P(SortedCOOToCSR, Result) {
cudaStream_t stream;
cudaStreamCreate(&stream);
std::shared_ptr<raft::mr::device::allocator> alloc(
new raft::mr::device::default_allocator);
int nnz = 8;
int *in, *out, *exp;
int *in_h = new int[nnz]{0, 0, 1, 1, 2, 2, 3, 3};
int *exp_h = new int[4]{0, 2, 4, 6};
raft::allocate(in, nnz, true);
raft::allocate(exp, 4, true);
raft::allocate(out, 4, true);
raft::update_device(in, in_h, nnz, stream);
raft::update_device(exp, exp_h, 4, stream);
convert::sorted_coo_to_csr<int>(in, nnz, out, 4, alloc, stream);
ASSERT_TRUE(raft::devArrMatch<int>(out, exp, 4, raft::Compare<int>()));
cudaStreamDestroy(stream);
delete[] in_h;
delete[] exp_h;
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(exp));
CUDA_CHECK(cudaFree(out));
}
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, SortedCOOToCSR,
::testing::ValuesIn(inputsf));
/******************************** adj graph ********************************/
template <typename Index_>
struct CSRAdjGraphInputs {
Index_ n_rows;
Index_ n_cols;
std::vector<Index_> row_ind;
std::vector<uint8_t> adj; // To avoid vector<bool> optimization
std::vector<Index_> verify;
};
template <typename Index_>
class CSRAdjGraphTest
: public ::testing::TestWithParam<CSRAdjGraphInputs<Index_>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<CSRAdjGraphInputs<Index_>>::GetParam();
cudaStreamCreate(&stream);
nnz = params.verify.size();
raft::allocate(row_ind, params.n_rows);
raft::allocate(adj, params.n_rows * params.n_cols);
raft::allocate(result, nnz, true);
raft::allocate(verify, nnz);
}
void Run() {
raft::update_device(row_ind, params.row_ind.data(), params.n_rows, stream);
raft::update_device(adj, reinterpret_cast<bool *>(params.adj.data()),
params.n_rows * params.n_cols, stream);
raft::update_device(verify, params.verify.data(), nnz, stream);
convert::csr_adj_graph_batched<Index_, 32>(
row_ind, params.n_cols, nnz, params.n_rows, adj, result, stream);
ASSERT_TRUE(
raft::devArrMatch<Index_>(verify, result, nnz, raft::Compare<Index_>()));
}
void TearDown() override {
CUDA_CHECK(cudaFree(row_ind));
CUDA_CHECK(cudaFree(adj));
CUDA_CHECK(cudaFree(verify));
CUDA_CHECK(cudaFree(result));
cudaStreamDestroy(stream);
}
protected:
CSRAdjGraphInputs<Index_> params;
cudaStream_t stream;
Index_ nnz;
Index_ *row_ind, *result, *verify;
bool *adj;
};
using CSRAdjGraphTestI = CSRAdjGraphTest<int>;
TEST_P(CSRAdjGraphTestI, Result) { Run(); }
using CSRAdjGraphTestL = CSRAdjGraphTest<int64_t>;
TEST_P(CSRAdjGraphTestL, Result) { Run(); }
const std::vector<CSRAdjGraphInputs<int>> csradjgraph_inputs_i = {
{3,
6,
{0, 3, 6},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 1, 2, 0, 1, 2, 0, 1, 2}},
};
const std::vector<CSRAdjGraphInputs<int64_t>> csradjgraph_inputs_l = {
{3,
6,
{0, 3, 6},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 1, 2, 0, 1, 2, 0, 1, 2}},
};
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, CSRAdjGraphTestI,
::testing::ValuesIn(csradjgraph_inputs_i));
INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, CSRAdjGraphTestL,
::testing::ValuesIn(csradjgraph_inputs_l));
} // namespace sparse
} // namespace raft
|
ee447c48b8a138e91ea56a5b6b199ab97f2cd6c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <map>
#include <chrono>
#include <math.h>
#include <iostream>
using namespace std;
// Defines from https://gist.github.com/Tener/803377/38562ed70bd627dac09946222d1005d7d4e95e50
#define CUDA_CALL(x) do { if((x) != hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
exit( EXIT_FAILURE );}} while(0)
#define CURAND_CALL(x) do { if((x) != HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
exit( EXIT_FAILURE );}} while(0)
// deprecatd... Just to try my own generator, but there are way better ones
__global__
void gpuRandom(int* x)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
// int stride = blockDim.x * gridDim.x;
x[index] = 17 * index % 23;
}
class Solver
{
protected:
long seed;
long numElems;
virtual ostream& print(ostream& os) = 0;
public:
Solver(long n, long s) { numElems = n; seed = s; }
inline virtual ~Solver() {};
virtual void createUnordered() = 0;
virtual void order() = 0;
inline virtual void deleteData() = 0;
inline friend ostream& operator<<(ostream& os, Solver& dt)
{
if (dt.numElems > 1000)
{
os << "Way too many to show" << std::endl;
return os;
}
else
{
return dt.print(os);
}
};
};
template<typename T>
class CPUSolver: public Solver
{
T* array;
protected:
virtual ostream& print(ostream& os);
public:
inline CPUSolver<T>(long n, long s = 129229): Solver(n,s) { array = new T[numElems]; }
inline virtual ~CPUSolver() { };
inline virtual void deleteData() { delete array; };
virtual void createUnordered();
virtual void order();
};
template<typename T>
void CPUSolver<T>::createUnordered()
{
// The formula might break with too big numbers...
for (long i = 0; i < numElems; i++)
{
array[i] = (T) rand();
}
}
int cmpfunc(const void* a, const void* b) {
return (*(int*)a - *(int*)b);
}
// ONLY WORKS WITH INT!!!
template<typename T>
void CPUSolver<T>::order()
{
qsort(array, numElems, sizeof(int), cmpfunc);
}
template<typename T>
ostream& CPUSolver<T>::print(ostream& os)
{
for (long i = 0; i < numElems; i++)
{
os << array[i] << ", ";
}
os << std::endl;
return os;
}
template<typename T>
class GPUSolver : public Solver
{
T* x;
protected:
virtual ostream& print(ostream& os);
public:
inline GPUSolver<T>(long n, long s = 129229) : Solver(n, s) {
CUDA_CALL(hipMallocManaged(&x, numElems * sizeof(T)));
}
// Can't free in the destructor, since there are objects created with a copy constructor
// that can't free the info in this way... :(
inline virtual ~GPUSolver<T>() { };
inline virtual void deleteData()
{
if (x != NULL) { CUDA_CALL(hipFree(x)); x = NULL; }
};
virtual void createUnordered();
virtual void order();
};
void bitonic_sort(float* values, long numElems);
void bitonic_sort(int* values, long numElems);
template<typename T>
void GPUSolver<T>::createUnordered()
{
//int blockSize = 256;
//int numBlocks = (numElems + blockSize - 1) / blockSize;
//gpuRandom << <numBlocks, blockSize >> > ( x );
//// Wait for GPU to finish before accessing on host
//hipDeviceSynchronize();
hiprandGenerator_t gen;
/* Create pseudo-random number generator */
CURAND_CALL(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen, seed));
/* Generate n floats on device */
CURAND_CALL(hiprandGenerate(gen, (unsigned int*)x, numElems));
// Wait for GPU to finish before accessing on host
CUDA_CALL(hipDeviceSynchronize());
/* Cleanup */
CURAND_CALL(hiprandDestroyGenerator(gen));
}
// ONLY WORKS WITH FLOAT!!!
template<typename T>
void GPUSolver<T>::order()
{
bitonic_sort(x,numElems);
}
template<typename T>
ostream& GPUSolver<T>::print(ostream& os)
{
for (long i = 0; i < numElems; i++)
{
os << x[i] << ", ";
}
os << std::endl;
return os;
}
// Services
// Create a new set of numbers of type T. It returns the id of such set
// ONLY TESTED with int!!!
template<typename T>
long createSet(bool isGpu, long nElems, long seed = 129229);
// returns the ids of all sets available
// returns the metadata of the set of the given id
// returns a subset of the set. The parameters indicate which part
// order the given set
void orderSet(long id);
// deletes everything
void deleteAll();
struct MetadataSolver {
long id;
long numElems;
Solver* solver;
double nanosCreate;
double nanosOrder;
MetadataSolver() { id = -1; numElems = 0; solver = NULL; nanosCreate = -1; }
inline void deleteSolverData()
{
if (solver != NULL) { solver->deleteData(); solver = NULL; }
}
MetadataSolver(long i, long n, Solver* s, double d) { id = i; numElems = n; solver = s; nanosCreate = d; }
MetadataSolver(const MetadataSolver& m) { id = m.id; numElems = m.numElems; solver = m.solver; nanosCreate = m.nanosCreate; }
} ;
static long numSetIDs = 0;
typedef map<long, MetadataSolver> TupleMap;
static TupleMap sets;
int main1(int argc, char* argv[])
{
Solver* s = NULL;
long id;
// just for the GPU profiler to have something to report
float* tt;
hipMallocManaged(&tt, 5 * sizeof(float));
long numElems = 256 * 256;
if (argc <= 1)
{
id = createSet<int>(false, numElems, 129229);
s = sets[id].solver;
orderSet(id);
//s = new CPUSolver<int>(100, 129229);
}
else
{
id = createSet<int>(true, numElems, 129229);
s = sets[id].solver;
orderSet(id);
//s = new GPUSolver<int>(100, 129229);
}
cout << (*s);
return 0;
}
int mainInt(int argc, char* argv[])
{
long numElems = 256;
int exponente;
const int EXIT = 10;
int opcion = EXIT;
long id;
// just for the GPU profiler to have something to report
float* tt;
hipMallocManaged(&tt, 5 * sizeof(float));
do{
cout << "Escoja una opcin: " << endl;
cout << " 1. Crear y ordenar conjunto en CPU " << endl;
cout << " 2. Crear y ordenar conjunto en GPU " << endl;
cout << " 3. Borrar todo" << endl;
cout << " " << EXIT << ". Salir " << endl;
cin >> opcion;
if (opcion == EXIT)
{
break;
}
else if (opcion == 3)
{
deleteAll();
cout << "Todo Borrado" << endl;
}
else if( opcion == 1 || opcion == 2)
{
cout << "exponente de 256?" << endl;
cin >> exponente;
numElems = 256 * exponente;
if (opcion == 1 )
{
id = createSet<int>(false, numElems, 129229);
orderSet(id);
}
else if (opcion == 2 )
{
id = createSet<int>(true, numElems, 129229);
orderSet(id);
}
}
} while (opcion != EXIT);
return 0;
}
int main(int argc, char* argv[])
{
return mainInt(argc, argv);
}
template<typename T>
long createSet(bool isGpu, long nElems, long seed)
{
Solver* s = NULL;
long id = ++numSetIDs;
auto start = std::chrono::high_resolution_clock::now();
if (isGpu)
{
s = new GPUSolver<T>(nElems, seed);
}
else
{
s = new CPUSolver<T>(nElems, seed);
}
// create the unordered set
s->createUnordered();
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::nano> elapsed = finish - start;
std::cout << "Elapsed Time numElems " << nElems << " creation: " << fixed << elapsed.count() << " nanoseconds, typeid: " << typeid(elapsed.count()).name() << std::endl;
sets[id] = MetadataSolver(id, nElems, s, elapsed.count());
return id;
}
void orderSet(long id)
{
Solver* s = sets[id].solver;
if(s!= NULL)
{
auto start = std::chrono::high_resolution_clock::now();
s->order();
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::nano> elapsed = finish - start;
std::cout << "Elapsed Time numElems " << sets[id].numElems << " order: " << fixed << elapsed.count() << " nanoseconds, typeid: " << typeid(elapsed.count()).name() << std::endl;
sets[id].nanosOrder = elapsed.count();
}
}
void deleteAll()
{
for (auto it = sets.begin(); it != sets.end(); ) {
(it->second).deleteSolverData();
it = sets.erase(it);
}
}
| ee447c48b8a138e91ea56a5b6b199ab97f2cd6c9.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <curand.h>
#include <map>
#include <chrono>
#include <math.h>
#include <iostream>
using namespace std;
// Defines from https://gist.github.com/Tener/803377/38562ed70bd627dac09946222d1005d7d4e95e50
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
exit( EXIT_FAILURE );}} while(0)
#define CURAND_CALL(x) do { if((x) != CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
exit( EXIT_FAILURE );}} while(0)
// deprecatd... Just to try my own generator, but there are way better ones
__global__
void gpuRandom(int* x)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
// int stride = blockDim.x * gridDim.x;
x[index] = 17 * index % 23;
}
class Solver
{
protected:
long seed;
long numElems;
virtual ostream& print(ostream& os) = 0;
public:
Solver(long n, long s) { numElems = n; seed = s; }
inline virtual ~Solver() {};
virtual void createUnordered() = 0;
virtual void order() = 0;
inline virtual void deleteData() = 0;
inline friend ostream& operator<<(ostream& os, Solver& dt)
{
if (dt.numElems > 1000)
{
os << "Way too many to show" << std::endl;
return os;
}
else
{
return dt.print(os);
}
};
};
template<typename T>
class CPUSolver: public Solver
{
T* array;
protected:
virtual ostream& print(ostream& os);
public:
inline CPUSolver<T>(long n, long s = 129229): Solver(n,s) { array = new T[numElems]; }
inline virtual ~CPUSolver() { };
inline virtual void deleteData() { delete array; };
virtual void createUnordered();
virtual void order();
};
template<typename T>
void CPUSolver<T>::createUnordered()
{
// The formula might break with too big numbers...
for (long i = 0; i < numElems; i++)
{
array[i] = (T) rand();
}
}
int cmpfunc(const void* a, const void* b) {
return (*(int*)a - *(int*)b);
}
// ONLY WORKS WITH INT!!!
template<typename T>
void CPUSolver<T>::order()
{
qsort(array, numElems, sizeof(int), cmpfunc);
}
template<typename T>
ostream& CPUSolver<T>::print(ostream& os)
{
for (long i = 0; i < numElems; i++)
{
os << array[i] << ", ";
}
os << std::endl;
return os;
}
template<typename T>
class GPUSolver : public Solver
{
T* x;
protected:
virtual ostream& print(ostream& os);
public:
inline GPUSolver<T>(long n, long s = 129229) : Solver(n, s) {
CUDA_CALL(cudaMallocManaged(&x, numElems * sizeof(T)));
}
// Can't free in the destructor, since there are objects created with a copy constructor
// that can't free the info in this way... :(
inline virtual ~GPUSolver<T>() { };
inline virtual void deleteData()
{
if (x != NULL) { CUDA_CALL(cudaFree(x)); x = NULL; }
};
virtual void createUnordered();
virtual void order();
};
void bitonic_sort(float* values, long numElems);
void bitonic_sort(int* values, long numElems);
template<typename T>
void GPUSolver<T>::createUnordered()
{
//int blockSize = 256;
//int numBlocks = (numElems + blockSize - 1) / blockSize;
//gpuRandom << <numBlocks, blockSize >> > ( x );
//// Wait for GPU to finish before accessing on host
//cudaDeviceSynchronize();
curandGenerator_t gen;
/* Create pseudo-random number generator */
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, seed));
/* Generate n floats on device */
CURAND_CALL(curandGenerate(gen, (unsigned int*)x, numElems));
// Wait for GPU to finish before accessing on host
CUDA_CALL(cudaDeviceSynchronize());
/* Cleanup */
CURAND_CALL(curandDestroyGenerator(gen));
}
// ONLY WORKS WITH FLOAT!!!
template<typename T>
void GPUSolver<T>::order()
{
bitonic_sort(x,numElems);
}
template<typename T>
ostream& GPUSolver<T>::print(ostream& os)
{
for (long i = 0; i < numElems; i++)
{
os << x[i] << ", ";
}
os << std::endl;
return os;
}
// Services
// Create a new set of numbers of type T. It returns the id of such set
// ONLY TESTED with int!!!
template<typename T>
long createSet(bool isGpu, long nElems, long seed = 129229);
// returns the ids of all sets available
// returns the metadata of the set of the given id
// returns a subset of the set. The parameters indicate which part
// order the given set
void orderSet(long id);
// deletes everything
void deleteAll();
struct MetadataSolver {
long id;
long numElems;
Solver* solver;
double nanosCreate;
double nanosOrder;
MetadataSolver() { id = -1; numElems = 0; solver = NULL; nanosCreate = -1; }
inline void deleteSolverData()
{
if (solver != NULL) { solver->deleteData(); solver = NULL; }
}
MetadataSolver(long i, long n, Solver* s, double d) { id = i; numElems = n; solver = s; nanosCreate = d; }
MetadataSolver(const MetadataSolver& m) { id = m.id; numElems = m.numElems; solver = m.solver; nanosCreate = m.nanosCreate; }
} ;
static long numSetIDs = 0;
typedef map<long, MetadataSolver> TupleMap;
static TupleMap sets;
int main1(int argc, char* argv[])
{
Solver* s = NULL;
long id;
// just for the GPU profiler to have something to report
float* tt;
cudaMallocManaged(&tt, 5 * sizeof(float));
long numElems = 256 * 256;
if (argc <= 1)
{
id = createSet<int>(false, numElems, 129229);
s = sets[id].solver;
orderSet(id);
//s = new CPUSolver<int>(100, 129229);
}
else
{
id = createSet<int>(true, numElems, 129229);
s = sets[id].solver;
orderSet(id);
//s = new GPUSolver<int>(100, 129229);
}
cout << (*s);
return 0;
}
int mainInt(int argc, char* argv[])
{
long numElems = 256;
int exponente;
const int EXIT = 10;
int opcion = EXIT;
long id;
// just for the GPU profiler to have something to report
float* tt;
cudaMallocManaged(&tt, 5 * sizeof(float));
do{
cout << "Escoja una opción: " << endl;
cout << " 1. Crear y ordenar conjunto en CPU " << endl;
cout << " 2. Crear y ordenar conjunto en GPU " << endl;
cout << " 3. Borrar todo" << endl;
cout << " " << EXIT << ". Salir " << endl;
cin >> opcion;
if (opcion == EXIT)
{
break;
}
else if (opcion == 3)
{
deleteAll();
cout << "Todo Borrado" << endl;
}
else if( opcion == 1 || opcion == 2)
{
cout << "exponente de 256?" << endl;
cin >> exponente;
numElems = 256 * exponente;
if (opcion == 1 )
{
id = createSet<int>(false, numElems, 129229);
orderSet(id);
}
else if (opcion == 2 )
{
id = createSet<int>(true, numElems, 129229);
orderSet(id);
}
}
} while (opcion != EXIT);
return 0;
}
int main(int argc, char* argv[])
{
return mainInt(argc, argv);
}
template<typename T>
long createSet(bool isGpu, long nElems, long seed)
{
Solver* s = NULL;
long id = ++numSetIDs;
auto start = std::chrono::high_resolution_clock::now();
if (isGpu)
{
s = new GPUSolver<T>(nElems, seed);
}
else
{
s = new CPUSolver<T>(nElems, seed);
}
// create the unordered set
s->createUnordered();
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::nano> elapsed = finish - start;
std::cout << "Elapsed Time numElems " << nElems << " creation: " << fixed << elapsed.count() << " nanoseconds, typeid: " << typeid(elapsed.count()).name() << std::endl;
sets[id] = MetadataSolver(id, nElems, s, elapsed.count());
return id;
}
void orderSet(long id)
{
Solver* s = sets[id].solver;
if(s!= NULL)
{
auto start = std::chrono::high_resolution_clock::now();
s->order();
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::nano> elapsed = finish - start;
std::cout << "Elapsed Time numElems " << sets[id].numElems << " order: " << fixed << elapsed.count() << " nanoseconds, typeid: " << typeid(elapsed.count()).name() << std::endl;
sets[id].nanosOrder = elapsed.count();
}
}
void deleteAll()
{
for (auto it = sets.begin(); it != sets.end(); ) {
(it->second).deleteSolverData();
it = sets.erase(it);
}
}
|
755b517aa58b3f3fedafa34af75158a7b4cbd2b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "ctranslate2/primitives.h"
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include "cuda/helpers.h"
#include "type_dispatch.h"
namespace ctranslate2 {
template<>
template <typename T>
T primitives<Device::CUDA>::at(const T* x, dim_t index) {
T val = T();
cross_device_primitives<Device::CUDA, Device::CPU>::copy(x + index, &val, 1);
return val;
}
template<>
template <typename T>
void primitives<Device::CUDA>::fill(T* x, T a, dim_t size) {
THRUST_CALL(thrust::fill, x, x + size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size) {
auto it = thrust::make_permutation_iterator(
x, thrust::make_transform_iterator(thrust::counting_iterator<dim_t>(0),
thrust::placeholders::_1 * inc_x));
THRUST_CALL(thrust::fill, it, it + size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T),
hipMemcpyDeviceToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename U, typename V>
void primitives<Device::CUDA>::convert(const U* x, V* y, dim_t size) {
THRUST_CALL(thrust::copy,
cuda::device_cast(x), cuda::device_cast(x) + size, cuda::device_cast(y));
}
template void primitives<Device::CUDA>::convert(const float*, float16_t*, dim_t);
template void primitives<Device::CUDA>::convert(const float16_t*, float*, dim_t);
template<>
template <typename T>
T primitives<Device::CUDA>::sum(const T* array, dim_t size) {
return T(THRUST_CALL(thrust::reduce,
cuda::device_cast(array),
cuda::device_cast(array) + size,
cuda::device_type<T>(),
cuda::plus<cuda::device_type<T>>()));
}
template<>
template <typename T>
dim_t primitives<Device::CUDA>::max_element(const T* array, dim_t size) {
const auto* max = THRUST_CALL(thrust::max_element,
cuda::device_cast(array),
cuda::device_cast(array) + size,
cuda::maximum<cuda::device_type<T>>());
return static_cast<dim_t>(max - cuda::device_cast(array));
}
template<>
template <typename T>
T primitives<Device::CUDA>::max(const T* array, dim_t size) {
return at(array, max_element(array, size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::plus, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::plus<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::plus<cuda::device_type<T>>(),
cuda::repeat_vec<dim_t>(a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::plus<cuda::device_type<T>>(),
cuda::repeat_vec_depth<dim_t>(b_size / a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::minus<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::minimum, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::minimum<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::maximum, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::maximum<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::multiplies, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::multiplies<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::multiplies<cuda::device_type<T>>(),
cuda::repeat_vec<dim_t>(a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::relu(const T* x, T* y, dim_t size) {
cuda::unary_transform(x, y, size, cuda::relu_func<cuda::device_type<T>>());
}
template void primitives<Device::CUDA>::relu(const float*, float*, dim_t);
template void primitives<Device::CUDA>::relu(const float16_t*, float16_t*, dim_t);
template<>
template <typename T>
void primitives<Device::CUDA>::gelu(const T* x, T* y, dim_t size) {
cuda::unary_transform(x, y, size, cuda::gelu_func<cuda::device_type<T>>());
}
template void primitives<Device::CUDA>::gelu(const float*, float*, dim_t);
template void primitives<Device::CUDA>::gelu(const float16_t*, float16_t*, dim_t);
template <typename T>
struct perm_indices_2d {
T _rows, _cols;
perm_indices_2d(T rows, T cols)
: _rows(rows)
, _cols(cols) {
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _rows;
const T i1 = i % _rows;
return i1 * _cols + i0;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_2d(const T* a, const dim_t* dims, T* b) {
cuda::permute(a, b, dims[0] * dims[1], perm_indices_2d<dim_t>(dims[0], dims[1]));
}
template <typename T>
struct perm_indices_3d {
T _a_ps0, _a_ps1, _a_ps2; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2; // Strides of the permutated array.
perm_indices_3d(const T* dims, const T* perm) {
const T a_stride[3] = {dims[1] * dims[2], dims[2], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_s0 = _b_d1 * _b_d2;
_b_s1 = _b_d2;
_b_s2 = 1;
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i % _b_d2;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_3d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
cuda::permute(a, b, dims[0] * dims[1] * dims[2], perm_indices_3d<dim_t>(dims, perm));
}
template <typename T>
struct perm_indices_4d {
T _a_ps0, _a_ps1, _a_ps2, _a_ps3; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2, _b_d3; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2, _b_s3; // Strides of the permutated array.
perm_indices_4d(const T* dims, const T* perm) {
const T a_stride[4] = {dims[1] * dims[2] * dims[3], dims[2] * dims[3], dims[3], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_a_ps3 = a_stride[perm[3]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_d3 = dims[perm[3]];
_b_s0 = _b_d1 * _b_d2 * _b_d3;
_b_s1 = _b_d2 * _b_d3;
_b_s2 = _b_d3;
_b_s3 = 1;
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i / _b_s2 % _b_d2;
const T i3 = i % _b_d3;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2 + i3 * _a_ps3;
}
};
template <typename T>
__global__ void transpose_0213(const T* in,
const dim_t rows,
const dim_t cols,
const dim_t stride1,
const dim_t stride2,
T* out) {
const dim_t stride = stride1 * stride2;
for (dim_t j = blockIdx.x; j < rows; j += gridDim.x) {
const dim_t z = j / stride;
const dim_t y = (j % stride) / stride1;
const dim_t x = (j % stride) % stride1;
const dim_t j2 = z * stride + x * stride2 + y;
const T* row_in = in + j2 * cols;
T* row_out = out + j * cols;
for (dim_t i = threadIdx.x; i < cols; i += blockDim.x) {
row_out[i] = row_in[i];
}
}
}
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_4d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
if (perm[0] == 0 && perm[1] == 2 && perm[2] == 1 && perm[3] == 3) {
// Optimize the permutation used in multi-head attention.
const dim_t rows = dims[0] * dims[1] * dims[2];
const dim_t cols = dims[3];
const dim_t blocks = ::min(rows, cuda::max_blocks);
const dim_t threads = ::min(cols, cuda::max_threads);
hipLaunchKernelGGL(( transpose_0213), dim3(blocks), dim3(threads), 0, cuda::get_cuda_stream(), a,
rows,
cols,
dims[1],
dims[2],
b);
return;
}
cuda::permute(a, b, dims[0] * dims[1] * dims[2] * dims[3], perm_indices_4d<dim_t>(dims, perm));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const float* a, const float* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c,
const float*) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemm(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb,
a, lda,
&beta,
c, ldc));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const float16_t* a, const float16_t* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float16_t* c,
const float16_t*) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const __half alpha_h = alpha;
const __half beta_h = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(hipblasGemmEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_h,
b, HIP_R_16F, ldb,
a, HIP_R_16F, lda,
&beta_h,
c, HIP_R_16F, ldc,
HIP_R_16F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const int8_t* a, const int8_t* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
int32_t* c,
const int32_t*) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
int32_t alpha_i = alpha;
int32_t beta_i = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(hipblasGemmEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_i,
b, HIP_R_8I, ldb,
a, HIP_R_8I, lda,
&beta_i,
c, HIP_R_32I, ldc,
HIP_R_32I,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
template<>
template<>
void primitives<Device::CUDA>::gemm_batch(const float* a, const float* b,
bool transpose_a, bool transpose_b,
dim_t batch_size,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const long long int stridea = m * k;
const long long int strideb = k * n;
const long long int stridec = m * n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemmStridedBatched(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb, strideb,
a, lda, stridea,
&beta,
c, ldc, stridec,
batch_size));
}
template<>
template<>
void primitives<Device::CUDA>::gemm_batch(const float16_t* a, const float16_t* b,
bool transpose_a, bool transpose_b,
dim_t batch_size,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float16_t* c) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const long long int stridea = m * k;
const long long int strideb = k * n;
const long long int stridec = m * n;
const hipblasOperation_t transa = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transb = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const __half alpha_h = alpha;
const __half beta_h = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(hipblasGemmStridedBatchedEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_h,
b, HIP_R_16F, ldb, strideb,
a, HIP_R_16F, lda, stridea,
&beta_h,
c, HIP_R_16F, ldc, stridec,
batch_size,
HIP_R_16F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
struct exp_func {
__host__ __device__
float operator()(float x) { return expf(x); }
};
template<>
void primitives<Device::CUDA>::exp(const float* x, float* y, dim_t size) {
cuda::unary_transform(x, y, size, exp_func());
}
struct log_func {
__host__ __device__
float operator()(float x) { return logf(x); }
};
template<>
template<>
void primitives<Device::CUDA>::log(const float* x, float* y, dim_t size) {
cuda::unary_transform(x, y, size, log_func());
}
#if CUDA_CAN_USE_HALF
struct hlog_func {
__device__
__half operator()(__half x) { return hlog(x); }
};
#else
struct hlog_func {
__host__ __device__
__half operator()(__half x) { return __half(logf(float(x))); }
};
#endif
template<>
template<>
void primitives<Device::CUDA>::log(const float16_t* x, float16_t* y, dim_t size) {
cuda::unary_transform(x, y, size, hlog_func());
}
template<>
template <typename T>
void cross_device_primitives<Device::CPU, Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T), hipMemcpyHostToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename T>
void cross_device_primitives<Device::CUDA, Device::CPU>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T), hipMemcpyDeviceToHost, cuda::get_cuda_stream()));
}
#define DECLARE_IMPL(T) \
template T \
primitives<Device::CUDA>::at(const T* x, dim_t index); \
template void \
primitives<Device::CUDA>::fill(T* x, T a, dim_t size); \
template void \
primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size); \
template void \
primitives<Device::CUDA>::copy<T>(const T* x, T* y, dim_t size); \
template T \
primitives<Device::CUDA>::sum(const T* array, dim_t size); \
template dim_t \
primitives<Device::CUDA>::max_element(const T* array, dim_t size); \
template T \
primitives<Device::CUDA>::max(const T* array, dim_t size); \
template void \
primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::transpose_2d(const T* a, \
const dim_t* dims, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_3d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_4d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
cross_device_primitives<Device::CPU, Device::CUDA>::copy<T>(const T*, T*, dim_t); \
template void \
cross_device_primitives<Device::CUDA, Device::CPU>::copy<T>(const T*, T*, dim_t);
DECLARE_ALL_TYPES(DECLARE_IMPL)
}
| 755b517aa58b3f3fedafa34af75158a7b4cbd2b8.cu | #include "ctranslate2/primitives.h"
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include "cuda/helpers.h"
#include "type_dispatch.h"
namespace ctranslate2 {
template<>
template <typename T>
T primitives<Device::CUDA>::at(const T* x, dim_t index) {
T val = T();
cross_device_primitives<Device::CUDA, Device::CPU>::copy(x + index, &val, 1);
return val;
}
template<>
template <typename T>
void primitives<Device::CUDA>::fill(T* x, T a, dim_t size) {
THRUST_CALL(thrust::fill, x, x + size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size) {
auto it = thrust::make_permutation_iterator(
x, thrust::make_transform_iterator(thrust::counting_iterator<dim_t>(0),
thrust::placeholders::_1 * inc_x));
THRUST_CALL(thrust::fill, it, it + size, a);
}
template<>
template <typename T>
void primitives<Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T),
cudaMemcpyDeviceToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename U, typename V>
void primitives<Device::CUDA>::convert(const U* x, V* y, dim_t size) {
THRUST_CALL(thrust::copy,
cuda::device_cast(x), cuda::device_cast(x) + size, cuda::device_cast(y));
}
template void primitives<Device::CUDA>::convert(const float*, float16_t*, dim_t);
template void primitives<Device::CUDA>::convert(const float16_t*, float*, dim_t);
template<>
template <typename T>
T primitives<Device::CUDA>::sum(const T* array, dim_t size) {
return T(THRUST_CALL(thrust::reduce,
cuda::device_cast(array),
cuda::device_cast(array) + size,
cuda::device_type<T>(),
cuda::plus<cuda::device_type<T>>()));
}
template<>
template <typename T>
dim_t primitives<Device::CUDA>::max_element(const T* array, dim_t size) {
const auto* max = THRUST_CALL(thrust::max_element,
cuda::device_cast(array),
cuda::device_cast(array) + size,
cuda::maximum<cuda::device_type<T>>());
return static_cast<dim_t>(max - cuda::device_cast(array));
}
template<>
template <typename T>
T primitives<Device::CUDA>::max(const T* array, dim_t size) {
return at(array, max_element(array, size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::plus, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::plus<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::plus<cuda::device_type<T>>(),
cuda::repeat_vec<dim_t>(a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::plus<cuda::device_type<T>>(),
cuda::repeat_vec_depth<dim_t>(b_size / a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::minus<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::minimum, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::minimum<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::maximum, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::maximum<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size) {
using DeviceT = cuda::device_type<T>;
cuda::unary_transform(x, y, size, cuda::bind_right<cuda::multiplies, DeviceT>(DeviceT(a)));
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size) {
cuda::binary_transform(a, b, c, size, cuda::multiplies<cuda::device_type<T>>());
}
template<>
template <typename T>
void primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, T* c,
dim_t a_size, dim_t b_size) {
cuda::binary_transform(a, b, c, b_size,
cuda::multiplies<cuda::device_type<T>>(),
cuda::repeat_vec<dim_t>(a_size));
}
template<>
template <typename T>
void primitives<Device::CUDA>::relu(const T* x, T* y, dim_t size) {
cuda::unary_transform(x, y, size, cuda::relu_func<cuda::device_type<T>>());
}
template void primitives<Device::CUDA>::relu(const float*, float*, dim_t);
template void primitives<Device::CUDA>::relu(const float16_t*, float16_t*, dim_t);
template<>
template <typename T>
void primitives<Device::CUDA>::gelu(const T* x, T* y, dim_t size) {
cuda::unary_transform(x, y, size, cuda::gelu_func<cuda::device_type<T>>());
}
template void primitives<Device::CUDA>::gelu(const float*, float*, dim_t);
template void primitives<Device::CUDA>::gelu(const float16_t*, float16_t*, dim_t);
template <typename T>
struct perm_indices_2d {
T _rows, _cols;
perm_indices_2d(T rows, T cols)
: _rows(rows)
, _cols(cols) {
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _rows;
const T i1 = i % _rows;
return i1 * _cols + i0;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_2d(const T* a, const dim_t* dims, T* b) {
cuda::permute(a, b, dims[0] * dims[1], perm_indices_2d<dim_t>(dims[0], dims[1]));
}
template <typename T>
struct perm_indices_3d {
T _a_ps0, _a_ps1, _a_ps2; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2; // Strides of the permutated array.
perm_indices_3d(const T* dims, const T* perm) {
const T a_stride[3] = {dims[1] * dims[2], dims[2], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_s0 = _b_d1 * _b_d2;
_b_s1 = _b_d2;
_b_s2 = 1;
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i % _b_d2;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2;
}
};
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_3d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
cuda::permute(a, b, dims[0] * dims[1] * dims[2], perm_indices_3d<dim_t>(dims, perm));
}
template <typename T>
struct perm_indices_4d {
T _a_ps0, _a_ps1, _a_ps2, _a_ps3; // Permuted strides of the original array.
T _b_d0, _b_d1, _b_d2, _b_d3; // Dimension of the permutated array.
T _b_s0, _b_s1, _b_s2, _b_s3; // Strides of the permutated array.
perm_indices_4d(const T* dims, const T* perm) {
const T a_stride[4] = {dims[1] * dims[2] * dims[3], dims[2] * dims[3], dims[3], 1};
_a_ps0 = a_stride[perm[0]];
_a_ps1 = a_stride[perm[1]];
_a_ps2 = a_stride[perm[2]];
_a_ps3 = a_stride[perm[3]];
_b_d0 = dims[perm[0]];
_b_d1 = dims[perm[1]];
_b_d2 = dims[perm[2]];
_b_d3 = dims[perm[3]];
_b_s0 = _b_d1 * _b_d2 * _b_d3;
_b_s1 = _b_d2 * _b_d3;
_b_s2 = _b_d3;
_b_s3 = 1;
}
__host__ __device__
T operator()(const T i) const {
const T i0 = i / _b_s0;
const T i1 = i / _b_s1 % _b_d1;
const T i2 = i / _b_s2 % _b_d2;
const T i3 = i % _b_d3;
return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2 + i3 * _a_ps3;
}
};
template <typename T>
__global__ void transpose_0213(const T* in,
const dim_t rows,
const dim_t cols,
const dim_t stride1,
const dim_t stride2,
T* out) {
const dim_t stride = stride1 * stride2;
for (dim_t j = blockIdx.x; j < rows; j += gridDim.x) {
const dim_t z = j / stride;
const dim_t y = (j % stride) / stride1;
const dim_t x = (j % stride) % stride1;
const dim_t j2 = z * stride + x * stride2 + y;
const T* row_in = in + j2 * cols;
T* row_out = out + j * cols;
for (dim_t i = threadIdx.x; i < cols; i += blockDim.x) {
row_out[i] = row_in[i];
}
}
}
template<>
template <typename T>
void primitives<Device::CUDA>::transpose_4d(const T* a,
const dim_t* dims,
const dim_t* perm,
T* b) {
if (perm[0] == 0 && perm[1] == 2 && perm[2] == 1 && perm[3] == 3) {
// Optimize the permutation used in multi-head attention.
const dim_t rows = dims[0] * dims[1] * dims[2];
const dim_t cols = dims[3];
const dim_t blocks = std::min(rows, cuda::max_blocks);
const dim_t threads = std::min(cols, cuda::max_threads);
transpose_0213<<<blocks, threads, 0, cuda::get_cuda_stream()>>>(a,
rows,
cols,
dims[1],
dims[2],
b);
return;
}
cuda::permute(a, b, dims[0] * dims[1] * dims[2] * dims[3], perm_indices_4d<dim_t>(dims, perm));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const float* a, const float* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c,
const float*) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemm(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb,
a, lda,
&beta,
c, ldc));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const float16_t* a, const float16_t* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float16_t* c,
const float16_t*) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
const __half alpha_h = alpha;
const __half beta_h = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(cublasGemmEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_h,
b, CUDA_R_16F, ldb,
a, CUDA_R_16F, lda,
&beta_h,
c, CUDA_R_16F, ldc,
CUDA_R_16F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
template<>
template<>
void primitives<Device::CUDA>::gemm(const int8_t* a, const int8_t* b,
bool, bool,
bool transpose_a, bool transpose_b,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
int32_t* c,
const int32_t*) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
int32_t alpha_i = alpha;
int32_t beta_i = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(cublasGemmEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_i,
b, CUDA_R_8I, ldb,
a, CUDA_R_8I, lda,
&beta_i,
c, CUDA_R_32I, ldc,
CUDA_R_32I,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
template<>
template<>
void primitives<Device::CUDA>::gemm_batch(const float* a, const float* b,
bool transpose_a, bool transpose_b,
dim_t batch_size,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float* c) {
// Memo: cuBLAS assumes column-major storage.
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const long long int stridea = m * k;
const long long int strideb = k * n;
const long long int stridec = m * n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemmStridedBatched(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha,
b, ldb, strideb,
a, lda, stridea,
&beta,
c, ldc, stridec,
batch_size));
}
template<>
template<>
void primitives<Device::CUDA>::gemm_batch(const float16_t* a, const float16_t* b,
bool transpose_a, bool transpose_b,
dim_t batch_size,
dim_t m, dim_t n, dim_t k,
float alpha, float beta,
float16_t* c) {
const int lda = transpose_a ? m : k;
const int ldb = transpose_b ? k : n;
const int ldc = n;
const long long int stridea = m * k;
const long long int strideb = k * n;
const long long int stridec = m * n;
const cublasOperation_t transa = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transb = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
const __half alpha_h = alpha;
const __half beta_h = beta;
// cuBLAS assumes column-major storage, so swap a and b accordingly.
CUBLAS_CHECK(cublasGemmStridedBatchedEx(cuda::get_cublas_handle(),
transb, transa,
n, m, k,
&alpha_h,
b, CUDA_R_16F, ldb, strideb,
a, CUDA_R_16F, lda, stridea,
&beta_h,
c, CUDA_R_16F, ldc, stridec,
batch_size,
CUDA_R_16F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
struct exp_func {
__host__ __device__
float operator()(float x) { return expf(x); }
};
template<>
void primitives<Device::CUDA>::exp(const float* x, float* y, dim_t size) {
cuda::unary_transform(x, y, size, exp_func());
}
struct log_func {
__host__ __device__
float operator()(float x) { return logf(x); }
};
template<>
template<>
void primitives<Device::CUDA>::log(const float* x, float* y, dim_t size) {
cuda::unary_transform(x, y, size, log_func());
}
#if CUDA_CAN_USE_HALF
struct hlog_func {
__device__
__half operator()(__half x) { return hlog(x); }
};
#else
struct hlog_func {
__host__ __device__
__half operator()(__half x) { return __half(logf(float(x))); }
};
#endif
template<>
template<>
void primitives<Device::CUDA>::log(const float16_t* x, float16_t* y, dim_t size) {
cuda::unary_transform(x, y, size, hlog_func());
}
template<>
template <typename T>
void cross_device_primitives<Device::CPU, Device::CUDA>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T), cudaMemcpyHostToDevice, cuda::get_cuda_stream()));
}
template<>
template <typename T>
void cross_device_primitives<Device::CUDA, Device::CPU>::copy(const T* x, T* y, dim_t size) {
CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T), cudaMemcpyDeviceToHost, cuda::get_cuda_stream()));
}
#define DECLARE_IMPL(T) \
template T \
primitives<Device::CUDA>::at(const T* x, dim_t index); \
template void \
primitives<Device::CUDA>::fill(T* x, T a, dim_t size); \
template void \
primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size); \
template void \
primitives<Device::CUDA>::copy<T>(const T* x, T* y, dim_t size); \
template T \
primitives<Device::CUDA>::sum(const T* array, dim_t size); \
template dim_t \
primitives<Device::CUDA>::max_element(const T* array, dim_t size); \
template T \
primitives<Device::CUDA>::max(const T* array, dim_t size); \
template void \
primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size); \
template void \
primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size); \
template void \
primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, \
T* c, dim_t a_size, dim_t b_size); \
template void \
primitives<Device::CUDA>::transpose_2d(const T* a, \
const dim_t* dims, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_3d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
primitives<Device::CUDA>::transpose_4d(const T* a, \
const dim_t* dims, \
const dim_t* perm, \
T* b); \
template void \
cross_device_primitives<Device::CPU, Device::CUDA>::copy<T>(const T*, T*, dim_t); \
template void \
cross_device_primitives<Device::CUDA, Device::CPU>::copy<T>(const T*, T*, dim_t);
DECLARE_ALL_TYPES(DECLARE_IMPL)
}
|
2b6aecbd326f0efe7f8f7502d1de5bb69ec6a159.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "col2im.h"
#include "hip/hip_runtime.h"
}
__global__ void col2im_gpu_kernel(const int n, const float* data_col,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_im) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
float val = 0;
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = index / (width * height);
int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int w_col_end = min(w / stride + 1, width_col);
int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int h_col_end = min(h / stride + 1, height_col);
int offset =
(c * ksize * ksize + h * ksize + w) * height_col * width_col;
int coeff_h_col = (1 - stride * ksize * height_col) * width_col;
int coeff_w_col = (1 - stride * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] += val;
}
}
void col2im_gpu(float *data_col,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_im){
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height * width;
hipLaunchKernelGGL(( col2im_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK),
dim3(BLOCK), 0, 0,
num_kernels, data_col, height, width, ksize, pad,
stride, height_col,
width_col, data_im);
} | 2b6aecbd326f0efe7f8f7502d1de5bb69ec6a159.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "col2im.h"
#include "cuda.h"
}
__global__ void col2im_gpu_kernel(const int n, const float* data_col,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_im) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
float val = 0;
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = index / (width * height);
int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int w_col_end = min(w / stride + 1, width_col);
int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int h_col_end = min(h / stride + 1, height_col);
int offset =
(c * ksize * ksize + h * ksize + w) * height_col * width_col;
int coeff_h_col = (1 - stride * ksize * height_col) * width_col;
int coeff_w_col = (1 - stride * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] += val;
}
}
void col2im_gpu(float *data_col,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_im){
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height * width;
col2im_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
BLOCK>>>(
num_kernels, data_col, height, width, ksize, pad,
stride, height_col,
width_col, data_im);
} |
02a0c789ca9a44d8192a886ed1ec97fc042b8686.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <string.h>
//#include <windows.h>
// #include "__pikoDefines.h"
#include "reyesPipe.h"
#include "__pikoCompiledPipe.cuh"
#ifdef __PIKOC_HOST__
//#include <GL/glut.h>
#include <piko/builtinTypes.h>
#include "host_math.h"
#include "pikoTypes.h"
#include "FPSMeter.h"
// pikoc does not work well with assimp, so it will not be included when pikoc runs
#ifndef __PIKOC__
#include "sceneParser.h"
#endif // __PIKOC__
using namespace std;
#define PATCH_BUFFER_SIZE 6000
// ----------------------------------------
// function prototypes
// ----------------------------------------
void init(int argc, char* argv[]);
void initScreen(int W, int H);
void initScene();
void initPipe();
void display();
void destroyApp();
void doPerfTest(int n_runs = 10);
void runPipe();
void pipelineTest();
void resetDepthBuffer();
void printDepthBuffer();
// camera helper functions here
void buildProjectionMatrix();
void buildLookAt();
void glhPerspectivef2(float *matrix, float fovyInDegrees, float aspectRatio,
float znear, float zfar);
void glhFrustumf2(float *matrix, float left, float right, float bottom, float top,
float znear, float zfar);
void loadPatchBuffer(int start, int end);
// ----------------------------------------
// global variables
// ----------------------------------------
// camera angles
float theta, phi, camdist;
#ifndef __PIKOC__
// main scene
scene sMain;
#endif // __PIKOC__
piko_patch* patchBuffer = NULL;
ReyesPipe piko_pipe;
// state
ConstantState pipelineConstantState;
MutableState pipelineMutableState;
int numPatches;
int main(int argc, char* argv[])
{
hipSetDevice(0);
//glutInit(&argc, argv);
initScreen(640, 480);
//initScreen(1280, 720);
//initScreen(1024, 768);
initScene();
initPipe();
display();
//glutDisplayFunc(display);
//doPerfTest(5);
//atexit(destroyApp);
//glutMainLoop();
}
cvec4f matmultfloat4(float * mvpMat, cvec4f v)
{
cvec4f outRes;
(outRes).x = mvpMat[0] * v.x + mvpMat[4] * v.y + mvpMat[8 ] * v.z + mvpMat[12] * v.w;
(outRes).y = mvpMat[1] * v.x + mvpMat[5] * v.y + mvpMat[9 ] * v.z + mvpMat[13] * v.w;
(outRes).z = mvpMat[1] * v.x + mvpMat[6] * v.y + mvpMat[10] * v.z + mvpMat[14] * v.w;
(outRes).w = mvpMat[3] * v.x + mvpMat[7] * v.y + mvpMat[11] * v.z + mvpMat[15] * v.w;
return outRes;
}
void initScreen(int W, int H){
#ifndef __PIKOC__
sMain.cam().W() = W;
sMain.cam().H() = H;
#endif // __PIKOC__
pipelineConstantState.screenSizeX = W;
pipelineConstantState.screenSizeY = H;
//glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH);
//glutInitWindowSize(W,H);
//glutCreateWindow("Reyes Pipeline");
//glClearColor(0.0f, 0.0f, 0.2f, 1.0f);
}
void display()
{
// update state
buildProjectionMatrix();
resetDepthBuffer();
printf("running display\n");
piko_pipe.prepare();
printf("After prepare\n");
piko_pipe.run_single();
printf("After single\n");
unsigned* data = piko_pipe.pikoScreen.getData();
int x = pipelineConstantState.screenSizeX, y = pipelineConstantState.screenSizeY;
FILE *fp = fopen("result", "w");
for(int i=0; i<y; i++)
{
for(int j=0; j<x*4; j++)
{
fprintf(fp, "%d", ((char*)data)[i*x*4 + j]);
if(j != x*4 - 1)
{
fprintf(fp, "\t");
}
}
if(i != y - 1)
{
fprintf(fp, "\n");
}
}
fclose(fp);
return;
//glDrawPixels(pipelineConstantState.screenSizeX, pipelineConstantState.screenSizeY, GL_RGBA, GL_UNSIGNED_BYTE, data);
//glutSwapBuffers();
// for(int i=0; i< pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY; i++)
// {
// if(data[i] != 0)
// printf("%d: %x\n", i, data[i]);
// }
}
void doPerfTest(int n_runs)
{
printf("Running perf test...\n");
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
piko_pipe.run_single();
Stopwatch mywatch;
mywatch.Reset();
for(int run = 0; run < n_runs; run++)
{
printf("Start %d ---------- ", run);
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
printf("end\n");
}
float prepTime = mywatch.GetTime();
mywatch.Reset();
for(int run = 0; run < n_runs; run++)
{
printf("Start %d ---------- ", run);
fflush(NULL);
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
piko_pipe.run_single();
printf("end\n");
}
float fullrunTime = mywatch.GetTime();
float total_time_to_ms = 1000.0f / (float) n_runs;
printf("Prep time = %0.2f ms\n", total_time_to_ms * (prepTime));
printf("Full run time = %0.2f ms\n", total_time_to_ms * (fullrunTime));
printf("Raster time = %0.2f ms\n", total_time_to_ms * (fullrunTime - prepTime));
}
void initScene(){
// the scene will only be compiled when going through gcc and not pikoc
// parse scene file
sceneParser scp;
int nT, nV, nP;
buildProjectionMatrix();
scp.parseFile("./", "bezteapot.scene", &sMain);
printf("Flattening scene assets: "); fflush(stdout);
sMain.flatten(nT,nV, nP);
printf("T: %d, V: %d P:%d\n", nT, nV, nP);
numPatches = nP;
// create the final matrix
// FIXME: perhaps this is flipped?
//HOST::matmult4x4(pipelineConstantState.projMatrix, pipelineConstantState.viewMatrix,pipelineConstantState.viewProjMatrix );
//HOST::matmult4x4(pipelineConstantState.viewMatrix, pipelineConstantState.projMatrix, pipelineConstantState.viewProjMatrix);
sMain.cam().display();
}
void initPipe()
{
// build the state from the scene
// camera& cam = sMain.cam();
// pipelineConstantState.camera_eye = cam.eye();
// pipelineConstantState.camera_target = cam.target();
// pipelineConstantState.camera_up = cam.up();
// if(sMain.lights().size() > 0) {
// pipelineConstantState.lightPos = sMain.lights()[0].pos();
// pipelineConstantState.lightColor = sMain.lights()[0].dif();
// }
// else {
// // some default light that might suck
// pipelineConstantState.lightPos = gencvec3f(1.0,1.0,1.0);
// pipelineConstantState.lightColor = gencvec3f(1.0,1.0,1.0);
// }
int numLoadPatches = numPatches;
loadPatchBuffer(0,numLoadPatches);
resetDepthBuffer();
piko_pipe.allocate(pipelineConstantState, pipelineMutableState, patchBuffer, numLoadPatches);
}
// void runPipe()
// {
// int count = 1;
// ReyesPipe p;
// p.run(pipelineConstantState,patchBuffer, 1);
// }
void getPerspectiveMat(float *mat, float fovy,
float aspect, float n, float f)
{
float l, r, b, t;
float pi = 4.0 * atan(1);
float jiao = fovy * pi / 360;
t = n * tanf(jiao);
b = -t;
l = b * aspect;
r = t * aspect;
memset(mat, 0, sizeof(float)*16);
mat[0] = (2.0f * n) / (r-l);
mat[2] = (r+l)/(r-l);
mat[5] = (2.0f * n) / (t-b);
mat[6] = (t+b)/(t-b);
mat[10] = -(f+n)/(f-n);
mat[11] = -(2.0f*f*n)/(f-n);
mat[14] = -1.0;
}
void getLookUpMat(float *res, float ex, float ey, float ez,
float tx, float ty, float tz, float ux, float uy, float uz)
{
float fx = tx - ex;
float fy = ty - ey;
float fz = tz - ez;
float flength = sqrt(fx*fx + fy*fy + fz*fz);
fx /= flength;
fy /= flength;
fz /= flength;
float sx, sy, sz;
sx = fy*uz - fz*uy;
sy = fz*ux - fx*uz;
sz = fx*uy - fy*ux;
float slength = sqrt(sx*sx + sy*sy + sz*sz);
sx /= slength;
sy /= slength;
sz /= slength;
ux = sy*fz - sz*fy;
uy = sz*fx - sx*fz;
uz = sx*fy - sy*fx;
float mat[16];
memset(mat, 0, sizeof(float)*16);
mat[0] = 1;
mat[5] = 1;
mat[10] = 1;
mat[15] = 1;
mat[0] = sx;
mat[1] = sy;
mat[2] = sz;
mat[4] = ux;
mat[5] = uy;
mat[6] = uz;
mat[8] = -fx;
mat[9] = -fy;
mat[10] = -fz;
float trans[16];
memset(trans, 0, sizeof(float)*16);
trans[0] = 1;
trans[5] = 1;
trans[10] = 1;
trans[15] = 1;
trans[3] = -ex;
trans[7] = -ey;
trans[11] = -ez;
memset(res, 0, sizeof(float)*16);
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
res[i*4 + j] += mat[i*4 + k] * trans[k*4 + j];
}
}
}
}
void buildProjectionMatrix()
{
camera& cam = sMain.cam();
float viewmat[16], mat[16], resProj[16], resView[16];
//glMatrixMode(GL_PROJECTION);
//glLoadIdentity();
memset(viewmat, 0, sizeof(float)*16);
viewmat[0] = 1;
viewmat[5] = 1;
viewmat[10] = 1;
viewmat[15] = 1;
getPerspectiveMat(mat, cam.fovyDeg(), cam.aspect(), cam.zNear(), cam.zFar());
//gluPerspective(cam.fovyDeg(), cam.aspect(), cam.zNear(), cam.zFar());
//glGetFloatv(GL_PROJECTION_MATRIX, viewmat);
memset(resProj, 0, sizeof(resProj));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
// resviewmatmat
resProj[j*4+i] += viewmat[k*4+i]*mat[k*4+j];
}
}
}
//glLoadMatrixf(resProj);
//glMatrixMode(GL_MODELVIEW);
//glLoadIdentity();
memset(viewmat, 0, sizeof(float)*16);
viewmat[0] = 1;
viewmat[5] = 1;
viewmat[10] = 1;
viewmat[15] = 1;
float lu[16];
getLookUpMat(lu, cam.eye().x, cam.eye().y, cam.eye().z,
cam.target().x, cam.target().y, cam.target().z,
cam.up().x, cam.up().y, cam.up().z);
memset(resView, 0, sizeof(resView));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
// resviewmatlu
resView[j*4+i] += viewmat[k*4+i]*lu[k*4+j];
}
}
}
//glLoadMatrixf(resView);
/*
gluLookAt(
cam.eye().x, cam.eye().y, cam.eye().z,
cam.target().x, cam.target().y, cam.target().z,
cam.up().x, cam.up().y, cam.up().z);
*/
/*
glGetFloatv(GL_MODELVIEW_MATRIX, viewmat);
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
printf("%.2f\t", viewmat[i*4+j]);
}
printf("\n");
}
printf("\n");
printf("\n");
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
printf("%.2f\t", lu[i*4+j]);
}
printf("\n");
}
printf("\n");
printf("\n");
*/
//glMatrixMode(GL_PROJECTION);
//glPushMatrix();
// glGetFloatv(GL_MODELVIEW_MATRIX, pipelineConstantState.viewMatrix);
memcpy(pipelineConstantState.viewMatrix, resView, sizeof(float)*16);
//glMultMatrixf(pipelineConstantState.viewMatrix);
float newProj[16];
memset(newProj, 0, sizeof(newProj));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
newProj[j*4 + i] += resProj[k*4+i] * resView[j*4 + k];
}
}
}
memcpy(pipelineConstantState.viewProjMatrix, newProj, sizeof(float)*16);
//glGetFloatv(GL_PROJECTION_MATRIX, pipelineConstantState.viewProjMatrix);
//glPopMatrix();
// printf("final projection matrix:\n");
// for(int i=0; i<16; i++) {
// if (i%4 ==0) printf("\n");
// printf("%f ", pipelineConstantState.viewProjMatrix[i]);
// } printf("\n");
// printf("final modelview matrix:\n");
// for(int i=0; i<16; i++) {
// if (i%4 ==0) printf("\n");
// printf("%f ", pipelineConstantState.viewMatrix[i]);
// } printf("\n");
}
void loadPatchBuffer(int start, int end) {
// lazy create
if(patchBuffer == NULL) {
patchBuffer = new piko_patch[PATCH_BUFFER_SIZE];
}
int size = end - start;
if (size <=0) return;
int counter = 0;
printf("\nfetching patches from %d to %d\n", start, end);
for(int i=start; i<end; i++) {
for(int j=0; j<16; j++)
{
patchBuffer[counter].CP[j] = sMain._flatPatches[i*16+j];
//printf("flat patch: ");
//disp4(patchBuffer[counter].CP[j]);
//disp4(sMain._flatPatches[i*16+j]);
//printf("\n");
}
patchBuffer[counter].numSplits = 0; // all patches begin with zero splits
//patchBuffer[counter].id = counter;
//patchBuffer[counter].bbmin.x = 99999.0f;
//patchBuffer[counter].bbmin.y = 99999.0f;
//patchBuffer[counter].bbmax.x = -99999.0f;
//patchBuffer[counter].bbmax.y = -99999.0f;
counter++;
}
}
void pipelineTest()
{
// test out parts of the pipeline here
cvec4f point = patchBuffer[0].CP[0];
printf("\n\n point:\n");
disp4(point);
cvec4f clipPoint = matmultfloat4(pipelineConstantState.viewProjMatrix, point);
if(clipPoint.w == 0.0f) clipPoint.w = 1.0f;
clipPoint.x /= clipPoint.w;
clipPoint.y /= clipPoint.w;
clipPoint.z /= clipPoint.w;
clipPoint.x = (clipPoint.x+1.0) * 0.5 * pipelineConstantState.screenSizeX;
clipPoint.y = (clipPoint.y+1.0) * 0.5 * pipelineConstantState.screenSizeY;
printf("\n");
disp4(clipPoint);
printf("\n");
}
void resetDepthBuffer() {
int nPixels = pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY;
for(int i = 0; i < nPixels; i++) {
pipelineMutableState.zBuffer[i] = 1.0f;
}
}
void printDepthBuffer() {
int nPixels = pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY;
for(int i = 0; i < nPixels; i++) {
printf("%f\n", pipelineMutableState.zBuffer[i]);
}
}
void destroyApp()
{
piko_pipe.destroy();
}
#endif // __PIKOC_HOST__
| 02a0c789ca9a44d8192a886ed1ec97fc042b8686.cu |
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <string.h>
//#include <windows.h>
// #include "__pikoDefines.h"
#include "reyesPipe.h"
#include "__pikoCompiledPipe.cuh"
#ifdef __PIKOC_HOST__
//#include <GL/glut.h>
#include <piko/builtinTypes.h>
#include "host_math.h"
#include "pikoTypes.h"
#include "FPSMeter.h"
// pikoc does not work well with assimp, so it will not be included when pikoc runs
#ifndef __PIKOC__
#include "sceneParser.h"
#endif // __PIKOC__
using namespace std;
#define PATCH_BUFFER_SIZE 6000
// ----------------------------------------
// function prototypes
// ----------------------------------------
void init(int argc, char* argv[]);
void initScreen(int W, int H);
void initScene();
void initPipe();
void display();
void destroyApp();
void doPerfTest(int n_runs = 10);
void runPipe();
void pipelineTest();
void resetDepthBuffer();
void printDepthBuffer();
// camera helper functions here
void buildProjectionMatrix();
void buildLookAt();
void glhPerspectivef2(float *matrix, float fovyInDegrees, float aspectRatio,
float znear, float zfar);
void glhFrustumf2(float *matrix, float left, float right, float bottom, float top,
float znear, float zfar);
void loadPatchBuffer(int start, int end);
// ----------------------------------------
// global variables
// ----------------------------------------
// camera angles
float theta, phi, camdist;
#ifndef __PIKOC__
// main scene
scene sMain;
#endif // __PIKOC__
piko_patch* patchBuffer = NULL;
ReyesPipe piko_pipe;
// state
ConstantState pipelineConstantState;
MutableState pipelineMutableState;
int numPatches;
int main(int argc, char* argv[])
{
cudaSetDevice(0);
//glutInit(&argc, argv);
initScreen(640, 480);
//initScreen(1280, 720);
//initScreen(1024, 768);
initScene();
initPipe();
display();
//glutDisplayFunc(display);
//doPerfTest(5);
//atexit(destroyApp);
//glutMainLoop();
}
cvec4f matmultfloat4(float * mvpMat, cvec4f v)
{
cvec4f outRes;
(outRes).x = mvpMat[0] * v.x + mvpMat[4] * v.y + mvpMat[8 ] * v.z + mvpMat[12] * v.w;
(outRes).y = mvpMat[1] * v.x + mvpMat[5] * v.y + mvpMat[9 ] * v.z + mvpMat[13] * v.w;
(outRes).z = mvpMat[1] * v.x + mvpMat[6] * v.y + mvpMat[10] * v.z + mvpMat[14] * v.w;
(outRes).w = mvpMat[3] * v.x + mvpMat[7] * v.y + mvpMat[11] * v.z + mvpMat[15] * v.w;
return outRes;
}
void initScreen(int W, int H){
#ifndef __PIKOC__
sMain.cam().W() = W;
sMain.cam().H() = H;
#endif // __PIKOC__
pipelineConstantState.screenSizeX = W;
pipelineConstantState.screenSizeY = H;
//glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH);
//glutInitWindowSize(W,H);
//glutCreateWindow("Reyes Pipeline");
//glClearColor(0.0f, 0.0f, 0.2f, 1.0f);
}
void display()
{
// update state
buildProjectionMatrix();
resetDepthBuffer();
printf("running display\n");
piko_pipe.prepare();
printf("After prepare\n");
piko_pipe.run_single();
printf("After single\n");
unsigned* data = piko_pipe.pikoScreen.getData();
int x = pipelineConstantState.screenSizeX, y = pipelineConstantState.screenSizeY;
FILE *fp = fopen("result", "w");
for(int i=0; i<y; i++)
{
for(int j=0; j<x*4; j++)
{
fprintf(fp, "%d", ((char*)data)[i*x*4 + j]);
if(j != x*4 - 1)
{
fprintf(fp, "\t");
}
}
if(i != y - 1)
{
fprintf(fp, "\n");
}
}
fclose(fp);
return;
//glDrawPixels(pipelineConstantState.screenSizeX, pipelineConstantState.screenSizeY, GL_RGBA, GL_UNSIGNED_BYTE, data);
//glutSwapBuffers();
// for(int i=0; i< pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY; i++)
// {
// if(data[i] != 0)
// printf("%d: %x\n", i, data[i]);
// }
}
void doPerfTest(int n_runs)
{
printf("Running perf test...\n");
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
piko_pipe.run_single();
Stopwatch mywatch;
mywatch.Reset();
for(int run = 0; run < n_runs; run++)
{
printf("Start %d ---------- ", run);
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
printf("end\n");
}
float prepTime = mywatch.GetTime();
mywatch.Reset();
for(int run = 0; run < n_runs; run++)
{
printf("Start %d ---------- ", run);
fflush(NULL);
buildProjectionMatrix();
resetDepthBuffer();
piko_pipe.prepare();
piko_pipe.run_single();
printf("end\n");
}
float fullrunTime = mywatch.GetTime();
float total_time_to_ms = 1000.0f / (float) n_runs;
printf("Prep time = %0.2f ms\n", total_time_to_ms * (prepTime));
printf("Full run time = %0.2f ms\n", total_time_to_ms * (fullrunTime));
printf("Raster time = %0.2f ms\n", total_time_to_ms * (fullrunTime - prepTime));
}
void initScene(){
// the scene will only be compiled when going through gcc and not pikoc
// parse scene file
sceneParser scp;
int nT, nV, nP;
buildProjectionMatrix();
scp.parseFile("./", "bezteapot.scene", &sMain);
printf("Flattening scene assets: "); fflush(stdout);
sMain.flatten(nT,nV, nP);
printf("T: %d, V: %d P:%d\n", nT, nV, nP);
numPatches = nP;
// create the final matrix
// FIXME: perhaps this is flipped?
//HOST::matmult4x4(pipelineConstantState.projMatrix, pipelineConstantState.viewMatrix,pipelineConstantState.viewProjMatrix );
//HOST::matmult4x4(pipelineConstantState.viewMatrix, pipelineConstantState.projMatrix, pipelineConstantState.viewProjMatrix);
sMain.cam().display();
}
void initPipe()
{
// build the state from the scene
// camera& cam = sMain.cam();
// pipelineConstantState.camera_eye = cam.eye();
// pipelineConstantState.camera_target = cam.target();
// pipelineConstantState.camera_up = cam.up();
// if(sMain.lights().size() > 0) {
// pipelineConstantState.lightPos = sMain.lights()[0].pos();
// pipelineConstantState.lightColor = sMain.lights()[0].dif();
// }
// else {
// // some default light that might suck
// pipelineConstantState.lightPos = gencvec3f(1.0,1.0,1.0);
// pipelineConstantState.lightColor = gencvec3f(1.0,1.0,1.0);
// }
int numLoadPatches = numPatches;
loadPatchBuffer(0,numLoadPatches);
resetDepthBuffer();
piko_pipe.allocate(pipelineConstantState, pipelineMutableState, patchBuffer, numLoadPatches);
}
// void runPipe()
// {
// int count = 1;
// ReyesPipe p;
// p.run(pipelineConstantState,patchBuffer, 1);
// }
void getPerspectiveMat(float *mat, float fovy,
float aspect, float n, float f)
{
float l, r, b, t;
float pi = 4.0 * atan(1);
float jiao = fovy * pi / 360;
t = n * tanf(jiao);
b = -t;
l = b * aspect;
r = t * aspect;
memset(mat, 0, sizeof(float)*16);
mat[0] = (2.0f * n) / (r-l);
mat[2] = (r+l)/(r-l);
mat[5] = (2.0f * n) / (t-b);
mat[6] = (t+b)/(t-b);
mat[10] = -(f+n)/(f-n);
mat[11] = -(2.0f*f*n)/(f-n);
mat[14] = -1.0;
}
void getLookUpMat(float *res, float ex, float ey, float ez,
float tx, float ty, float tz, float ux, float uy, float uz)
{
float fx = tx - ex;
float fy = ty - ey;
float fz = tz - ez;
float flength = sqrt(fx*fx + fy*fy + fz*fz);
fx /= flength;
fy /= flength;
fz /= flength;
float sx, sy, sz;
sx = fy*uz - fz*uy;
sy = fz*ux - fx*uz;
sz = fx*uy - fy*ux;
float slength = sqrt(sx*sx + sy*sy + sz*sz);
sx /= slength;
sy /= slength;
sz /= slength;
ux = sy*fz - sz*fy;
uy = sz*fx - sx*fz;
uz = sx*fy - sy*fx;
float mat[16];
memset(mat, 0, sizeof(float)*16);
mat[0] = 1;
mat[5] = 1;
mat[10] = 1;
mat[15] = 1;
mat[0] = sx;
mat[1] = sy;
mat[2] = sz;
mat[4] = ux;
mat[5] = uy;
mat[6] = uz;
mat[8] = -fx;
mat[9] = -fy;
mat[10] = -fz;
float trans[16];
memset(trans, 0, sizeof(float)*16);
trans[0] = 1;
trans[5] = 1;
trans[10] = 1;
trans[15] = 1;
trans[3] = -ex;
trans[7] = -ey;
trans[11] = -ez;
memset(res, 0, sizeof(float)*16);
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
res[i*4 + j] += mat[i*4 + k] * trans[k*4 + j];
}
}
}
}
void buildProjectionMatrix()
{
camera& cam = sMain.cam();
float viewmat[16], mat[16], resProj[16], resView[16];
//glMatrixMode(GL_PROJECTION);
//glLoadIdentity();
memset(viewmat, 0, sizeof(float)*16);
viewmat[0] = 1;
viewmat[5] = 1;
viewmat[10] = 1;
viewmat[15] = 1;
getPerspectiveMat(mat, cam.fovyDeg(), cam.aspect(), cam.zNear(), cam.zFar());
//gluPerspective(cam.fovyDeg(), cam.aspect(), cam.zNear(), cam.zFar());
//glGetFloatv(GL_PROJECTION_MATRIX, viewmat);
memset(resProj, 0, sizeof(resProj));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
// res和viewmat是列优先,mat是行优先
resProj[j*4+i] += viewmat[k*4+i]*mat[k*4+j];
}
}
}
//glLoadMatrixf(resProj);
//glMatrixMode(GL_MODELVIEW);
//glLoadIdentity();
memset(viewmat, 0, sizeof(float)*16);
viewmat[0] = 1;
viewmat[5] = 1;
viewmat[10] = 1;
viewmat[15] = 1;
float lu[16];
getLookUpMat(lu, cam.eye().x, cam.eye().y, cam.eye().z,
cam.target().x, cam.target().y, cam.target().z,
cam.up().x, cam.up().y, cam.up().z);
memset(resView, 0, sizeof(resView));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
// res和viewmat是列优先,lu是行优先
resView[j*4+i] += viewmat[k*4+i]*lu[k*4+j];
}
}
}
//glLoadMatrixf(resView);
/*
gluLookAt(
cam.eye().x, cam.eye().y, cam.eye().z,
cam.target().x, cam.target().y, cam.target().z,
cam.up().x, cam.up().y, cam.up().z);
*/
/*
glGetFloatv(GL_MODELVIEW_MATRIX, viewmat);
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
printf("%.2f\t", viewmat[i*4+j]);
}
printf("\n");
}
printf("\n");
printf("\n");
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
printf("%.2f\t", lu[i*4+j]);
}
printf("\n");
}
printf("\n");
printf("\n");
*/
//glMatrixMode(GL_PROJECTION);
//glPushMatrix();
// glGetFloatv(GL_MODELVIEW_MATRIX, pipelineConstantState.viewMatrix);
memcpy(pipelineConstantState.viewMatrix, resView, sizeof(float)*16);
//glMultMatrixf(pipelineConstantState.viewMatrix);
float newProj[16];
memset(newProj, 0, sizeof(newProj));
for(int i=0; i<4; i++)
{
for(int j=0; j<4; j++)
{
for(int k=0; k<4; k++)
{
newProj[j*4 + i] += resProj[k*4+i] * resView[j*4 + k];
}
}
}
memcpy(pipelineConstantState.viewProjMatrix, newProj, sizeof(float)*16);
//glGetFloatv(GL_PROJECTION_MATRIX, pipelineConstantState.viewProjMatrix);
//glPopMatrix();
// printf("final projection matrix:\n");
// for(int i=0; i<16; i++) {
// if (i%4 ==0) printf("\n");
// printf("%f ", pipelineConstantState.viewProjMatrix[i]);
// } printf("\n");
// printf("final modelview matrix:\n");
// for(int i=0; i<16; i++) {
// if (i%4 ==0) printf("\n");
// printf("%f ", pipelineConstantState.viewMatrix[i]);
// } printf("\n");
}
void loadPatchBuffer(int start, int end) {
// lazy create
if(patchBuffer == NULL) {
patchBuffer = new piko_patch[PATCH_BUFFER_SIZE];
}
int size = end - start;
if (size <=0) return;
int counter = 0;
printf("\nfetching patches from %d to %d\n", start, end);
for(int i=start; i<end; i++) {
for(int j=0; j<16; j++)
{
patchBuffer[counter].CP[j] = sMain._flatPatches[i*16+j];
//printf("flat patch: ");
//disp4(patchBuffer[counter].CP[j]);
//disp4(sMain._flatPatches[i*16+j]);
//printf("\n");
}
patchBuffer[counter].numSplits = 0; // all patches begin with zero splits
//patchBuffer[counter].id = counter;
//patchBuffer[counter].bbmin.x = 99999.0f;
//patchBuffer[counter].bbmin.y = 99999.0f;
//patchBuffer[counter].bbmax.x = -99999.0f;
//patchBuffer[counter].bbmax.y = -99999.0f;
counter++;
}
}
void pipelineTest()
{
// test out parts of the pipeline here
cvec4f point = patchBuffer[0].CP[0];
printf("\n\n point:\n");
disp4(point);
cvec4f clipPoint = matmultfloat4(pipelineConstantState.viewProjMatrix, point);
if(clipPoint.w == 0.0f) clipPoint.w = 1.0f;
clipPoint.x /= clipPoint.w;
clipPoint.y /= clipPoint.w;
clipPoint.z /= clipPoint.w;
clipPoint.x = (clipPoint.x+1.0) * 0.5 * pipelineConstantState.screenSizeX;
clipPoint.y = (clipPoint.y+1.0) * 0.5 * pipelineConstantState.screenSizeY;
printf("\n");
disp4(clipPoint);
printf("\n");
}
void resetDepthBuffer() {
int nPixels = pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY;
for(int i = 0; i < nPixels; i++) {
pipelineMutableState.zBuffer[i] = 1.0f;
}
}
void printDepthBuffer() {
int nPixels = pipelineConstantState.screenSizeX * pipelineConstantState.screenSizeY;
for(int i = 0; i < nPixels; i++) {
printf("%f\n", pipelineMutableState.zBuffer[i]);
}
}
void destroyApp()
{
piko_pipe.destroy();
}
#endif // __PIKOC_HOST__
|
37ae57cef20edfd6afb7e11b827cd1421269e22a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Conv.h"
#include "../Common.cuh"
#include <iostream>
#define RUN_CONV(function, offset, limits, weight_shape, output_shape, parameters) \
for (offset.example = 0; offset.example < output_shape[0]; offset.example += limits.example) { \
for (offset.channel = 0; offset.channel < weight_shape[0] * weight_shape[1]; offset.channel += limits.channel) { \
for (offset.x_out = 0; offset.x_out < output_shape[2]; offset.x_out += limits.x_out) { \
for (offset.y_out = 0; offset.y_out < output_shape[3]; offset.y_out += limits.y_out) { \
for (offset.x_in = 0; offset.x_in < weight_shape[2]; offset.x_in += limits.x_in) { \
for (offset.y_in = 0; offset.y_in < weight_shape[3]; offset.y_in += limits.y_in) { \
int grid_x = min(output_shape[2] - offset.x_out, limits.x_out); \
int grid_y = min(output_shape[3] - offset.y_out, limits.y_out); \
int grid_z = min(output_shape[0] - offset.example, limits.example); \
dim3 grid(grid_x, grid_y, grid_z); \
int block_x = min(weight_shape[2] - offset.x_in, limits.x_in); \
int block_y = min(weight_shape[3] - offset.y_in, limits.y_in); \
int block_z = min(weight_shape[0] * weight_shape[1] - offset.channel, limits.channel);\
dim3 block(block_x, block_y, block_z); \
HE(hipMemcpy(d_offset, &offset, sizeof(ConvOffset), hipMemcpyHostToDevice)); \
hipLaunchKernelGGL(( function) , dim3(grid), dim3(block), 0, 0, parameters; \
HEhipPeekAtLastError()); \
} \
} \
} \
} \
} \
} \
#define CONV_PRINT_DEBUG false
#define CONV_BACK_PRINT_DEBUG false
using namespace std;
extern hipError_t cudaStatus;
__device__ ConvInfo get_indices(CUDATensor* input, CUDATensor* output, CUDATensor* weight, ConvOffset* offset) {
ConvInfo result = { 0 };
int in_width = 1;
int in_height = 1;
result.out_width = 1;
result.out_height = 1;
result.n_examples = input->shape[0];
result.n_channels_in = weight->shape[0];
result.n_channels_out = weight->shape[1];
if (input->dims == 4) {
in_width = input->shape[input->dims - 2];
in_height = input->shape[input->dims - 1];
result.out_width = output->shape[output->dims - 2];
result.out_height = output->shape[output->dims - 1];
}
else {
in_width = input->shape[input->dims - 1];
result.out_width = output->shape[output->dims - 1];
}
int x_out = offset->x_out + blockIdx.x;
int y_out = offset->y_out + blockIdx.y;
int x_in = offset->x_in + threadIdx.x;
int y_in = offset->y_in + threadIdx.y;
int channel = offset->channel + threadIdx.z;
int ch_in = channel / result.n_channels_out;
int ch_out = channel % result.n_channels_out;
int example = offset->example + blockIdx.z;
result.in_idx = example * result.n_channels_in * in_width * in_height + // example
ch_in * in_height * in_width + // in channel
(y_out + y_in) * in_width + // height
x_out + x_in; // width
result.kern_idx = ch_in * weight->shape[2] * weight->shape[3] + // in channel
ch_out * weight->shape[0] * weight->shape[2] * weight->shape[3] + // out channel
y_in * weight->shape[3] + // height
x_in; // width
result.out_idx = example * result.n_channels_out * result.out_width * result.out_height + // example
ch_out * result.out_width * result.out_height + // out channel
y_out * result.out_width + // height
x_out; // width
result.bias_idx = ch_out;
return result;
}
__global__ void convolve(CUDATensor* input, CUDATensor* output, CUDATensor* weight, CUDATensor* bias, ConvOffset* offset) {
ConvInfo indices = get_indices(input, output, weight, offset);
#if CONV_PRINT_DEBUG
if (indices.out_idx == 0) {
//printf("Out idx: %i, in idx: %i, weight idx: %i, weight: %2.5f, dim_x: %i, dim_y: %i, dim_z: %i, channel: %i\n",
// indices.out_idx, indices.in_idx, indices.kern_idx, weight->data[indices.kern_idx], threadIdx.x, threadIdx.y, threadIdx.z, offset->ch_in * weight->shape[1] + offset->ch_out + threadIdx.z);
printf("In idx: %i, block_x: %i, block_y: %i, block_z: %i, thread_x: %i, thread_y: %i, thread_z: %i\n",
indices.in_idx, blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z);
//printf("In idx: %i, offset_ch_in: %i, offset_ch_out: %i\n",
// indices.in_idx, offset->ch_in, offset->ch_out);
}
#endif
atomicAdd(output->data + indices.out_idx, input->data[indices.in_idx] * weight->data[indices.kern_idx]);
//#if CONV_PRINT_DEBUG
// printf("Input: %2.5f, weight: %2.5f, output: %2.5f\n",
// input->data[indices.in_idx], weight->data[indices.kern_idx], output->data[indices.out_idx]);
//#endif
}
__global__ void add_bias(CUDATensor* input, CUDATensor* output, CUDATensor* weight, CUDATensor* bias, ConvOffset* offset) {
ConvInfo indices = get_indices(input, output, weight, offset);
atomicAdd(output->data + indices.out_idx, bias->data[indices.bias_idx]);
#if CONV_PRINT_DEBUG
printf("BIAS:\n");
printf("Out idx: %i, bias idx: %i, output: %2.3f\n", indices.out_idx, indices.bias_idx, output->data[indices.out_idx]);
#endif
}
__global__ void convolve_backward(CUDATensor* input, CUDATensor* d_input, CUDATensor* d_output,
CUDATensor* weight, CUDATensor* bias,
CUDATensor* d_weight, CUDATensor* d_bias,
ConvOffset* offset) {
ConvInfo indices = get_indices(input, d_output, weight, offset);
int n_vals_w = indices.out_width * indices.out_height * indices.n_examples * indices.n_channels_out;
int n_vals_b = indices.out_width * indices.out_height * indices.n_examples * indices.n_channels_out *
indices.n_channels_in * weight->shape[2] * weight->shape[3];
atomicAdd(d_weight->data + indices.kern_idx, input->data[indices.in_idx] * d_output->data[indices.out_idx] / n_vals_w);
atomicAdd(d_bias->data + indices.bias_idx, d_output->data[indices.out_idx] / n_vals_b);
atomicAdd(d_input->data + indices.in_idx, d_output->data[indices.out_idx] * weight->data[indices.kern_idx] / n_vals_w);
#if CONV_BACK_PRINT_DEBUG
printf("Width: %i, height: %i, in idx: %i, out idx: %i, w_idx: %i, b_idx: %i\n", in_width, in_height, in_idx, out_idx, w_idx, b_idx);
printf("Input: %2.3f, output grad: %2.3f, values affected by W: %i, values affected by B: %i\n", input->data[in_idx], d_output->data[out_idx], n_vals_w, n_vals_b);
printf("New dw: %2.5f, new db: %2.5f, w_idx: %i, b_idx: %i\n", d_weight->data[w_idx], d_bias->data[b_idx], w_idx, b_idx);
printf("Sens: %2.5f, weight: %2.3f, grad: %2.3f, in_idx: %i, out_idx: %i, w_idx: %i\n", d_input->data[in_idx], weight->data[w_idx], d_output->data[out_idx], in_idx, out_idx, w_idx);
#endif
}
Conv2d::Conv2d(const int ch_in, const int ch_out, const int width, const int height) : weight(0), bias(0) {
limits.channel = 1;
limits.example = 1;
limits.x_in = 1;
limits.y_in = 1;
limits.x_out = 1;
limits.y_out = 1;
limits.channel = 4;
limits.example = 16;
limits.x_in = 8;
limits.y_in = 8;
limits.x_out = 16;
limits.y_out = 16;
vector<int> weight_shape = { ch_in, ch_out, width, height };
vector<int> bias_shape = { ch_out };
vector<float> weight_data(ch_in * ch_out * width * height, 0);
vector<float> bias_data(ch_out, 0);
weight = new Tensor(weight_shape, &weight_data[0]);
bias = new Tensor(bias_shape, &bias_data[0]);
}
Conv2d::~Conv2d() {
if (weight) {
delete weight;
}
if (bias) {
delete bias;
}
}
void Conv2d::checkShapes(vector<int> input_shape, vector<int> output_shape, vector<int> weight_shape) {
int exp_out_width = input_shape[2] - 2 * (weight_shape[2] / 2);
int exp_out_height = input_shape[3] - 2 * (weight_shape[3] / 2);
int exp_out_channels = weight_shape[1];
int exp_in_channels = weight_shape[0];
if (input_shape[1] != exp_in_channels)
throw TensorShapeError();
if (output_shape[1] != exp_out_channels)
throw TensorShapeError();
if (exp_out_width <= 0 || output_shape[2] != exp_out_width)
throw TensorShapeError();
if (exp_out_height <= 0 || output_shape[3] != exp_out_height)
throw TensorShapeError();
}
void Conv2d::run(Tensor* output, Tensor* input, Tensor* _) {
record_flow(output, input);
output->clear();
vector<int> input_shape = input->getShape();
vector<int> output_shape = output->getShape();
vector<int> weight_shape = weight->getShape();
// throws TensorShapeError
checkShapes(input_shape, output_shape, weight_shape);
ConvOffset offset = { 0 };
ConvOffset* d_offset;
HE(hipMalloc((void**)&(d_offset), sizeof(ConvOffset)));
RUN_CONV(convolve, offset, limits, weight_shape, output_shape, (
input->getCudaData(),
output->getCudaData(),
weight->getCudaData(),
bias->getCudaData(),
d_offset
)
);
auto limits_local = limits;
limits_local.x_in = 1;
limits_local.channel = weight_shape[1];
weight_shape[0] = 1;
weight_shape[2] = 1;
weight_shape[3] = 1;
RUN_CONV(add_bias, offset, limits_local, weight_shape, output_shape, (
input->getCudaData(),
output->getCudaData(),
weight->getCudaData(),
bias->getCudaData(),
d_offset
)
);
}
void Conv2d::update(float lr) {
//
}
void Conv2d::propagate() {
// Out = f(Wx + b)
// dOut/dW = df/d(Wx + b) * x
weight->clear(true);
bias->clear(true);
vector<int> input_shape = flow_input1->getShape();
vector<int> output_shape = flow_output->getShape();
vector<int> weight_shape = weight->getShape();
ConvOffset offset = { 0 };
ConvOffset* d_offset;
HE(hipMalloc((void**)&(d_offset), sizeof(ConvOffset)));
RUN_CONV(convolve_backward, offset, limits, weight_shape, output_shape, (
flow_input1->getCudaData(),
flow_input1->getCudaGrad(),
flow_output->getCudaGrad(),
weight->getCudaData(),
bias->getCudaData(),
weight->getCudaGrad(),
bias->getCudaGrad(),
d_offset)
);
}
Conv1d::Conv1d(const int ch_in, const int ch_out, const int width) : Conv2d(ch_in, ch_out, width, 1) {
}
void Conv1d::run(Tensor* output, Tensor* input, Tensor* _) {
Conv2d::run(output->unsqueeze(), input->unsqueeze());
output->squeeze();
input->squeeze();
}
void Conv1d::propagate() {
flow_input1->unsqueeze();
flow_output->unsqueeze();
Conv2d::propagate();
flow_input1->squeeze();
flow_output->squeeze();
}
Conv1d::~Conv1d() {
//if (weight) {
// delete weight;
//}
//if (bias) {
// delete bias;
//}
}
| 37ae57cef20edfd6afb7e11b827cd1421269e22a.cu | #include "Conv.h"
#include "../Common.cuh"
#include <iostream>
#define RUN_CONV(function, offset, limits, weight_shape, output_shape, parameters) \
for (offset.example = 0; offset.example < output_shape[0]; offset.example += limits.example) { \
for (offset.channel = 0; offset.channel < weight_shape[0] * weight_shape[1]; offset.channel += limits.channel) { \
for (offset.x_out = 0; offset.x_out < output_shape[2]; offset.x_out += limits.x_out) { \
for (offset.y_out = 0; offset.y_out < output_shape[3]; offset.y_out += limits.y_out) { \
for (offset.x_in = 0; offset.x_in < weight_shape[2]; offset.x_in += limits.x_in) { \
for (offset.y_in = 0; offset.y_in < weight_shape[3]; offset.y_in += limits.y_in) { \
int grid_x = min(output_shape[2] - offset.x_out, limits.x_out); \
int grid_y = min(output_shape[3] - offset.y_out, limits.y_out); \
int grid_z = min(output_shape[0] - offset.example, limits.example); \
dim3 grid(grid_x, grid_y, grid_z); \
int block_x = min(weight_shape[2] - offset.x_in, limits.x_in); \
int block_y = min(weight_shape[3] - offset.y_in, limits.y_in); \
int block_z = min(weight_shape[0] * weight_shape[1] - offset.channel, limits.channel);\
dim3 block(block_x, block_y, block_z); \
HE(cudaMemcpy(d_offset, &offset, sizeof(ConvOffset), cudaMemcpyHostToDevice)); \
function <<<grid, block>>> parameters; \
HE(cudaPeekAtLastError()); \
} \
} \
} \
} \
} \
} \
#define CONV_PRINT_DEBUG false
#define CONV_BACK_PRINT_DEBUG false
using namespace std;
extern cudaError_t cudaStatus;
__device__ ConvInfo get_indices(CUDATensor* input, CUDATensor* output, CUDATensor* weight, ConvOffset* offset) {
ConvInfo result = { 0 };
int in_width = 1;
int in_height = 1;
result.out_width = 1;
result.out_height = 1;
result.n_examples = input->shape[0];
result.n_channels_in = weight->shape[0];
result.n_channels_out = weight->shape[1];
if (input->dims == 4) {
in_width = input->shape[input->dims - 2];
in_height = input->shape[input->dims - 1];
result.out_width = output->shape[output->dims - 2];
result.out_height = output->shape[output->dims - 1];
}
else {
in_width = input->shape[input->dims - 1];
result.out_width = output->shape[output->dims - 1];
}
int x_out = offset->x_out + blockIdx.x;
int y_out = offset->y_out + blockIdx.y;
int x_in = offset->x_in + threadIdx.x;
int y_in = offset->y_in + threadIdx.y;
int channel = offset->channel + threadIdx.z;
int ch_in = channel / result.n_channels_out;
int ch_out = channel % result.n_channels_out;
int example = offset->example + blockIdx.z;
result.in_idx = example * result.n_channels_in * in_width * in_height + // example
ch_in * in_height * in_width + // in channel
(y_out + y_in) * in_width + // height
x_out + x_in; // width
result.kern_idx = ch_in * weight->shape[2] * weight->shape[3] + // in channel
ch_out * weight->shape[0] * weight->shape[2] * weight->shape[3] + // out channel
y_in * weight->shape[3] + // height
x_in; // width
result.out_idx = example * result.n_channels_out * result.out_width * result.out_height + // example
ch_out * result.out_width * result.out_height + // out channel
y_out * result.out_width + // height
x_out; // width
result.bias_idx = ch_out;
return result;
}
__global__ void convolve(CUDATensor* input, CUDATensor* output, CUDATensor* weight, CUDATensor* bias, ConvOffset* offset) {
ConvInfo indices = get_indices(input, output, weight, offset);
#if CONV_PRINT_DEBUG
if (indices.out_idx == 0) {
//printf("Out idx: %i, in idx: %i, weight idx: %i, weight: %2.5f, dim_x: %i, dim_y: %i, dim_z: %i, channel: %i\n",
// indices.out_idx, indices.in_idx, indices.kern_idx, weight->data[indices.kern_idx], threadIdx.x, threadIdx.y, threadIdx.z, offset->ch_in * weight->shape[1] + offset->ch_out + threadIdx.z);
printf("In idx: %i, block_x: %i, block_y: %i, block_z: %i, thread_x: %i, thread_y: %i, thread_z: %i\n",
indices.in_idx, blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z);
//printf("In idx: %i, offset_ch_in: %i, offset_ch_out: %i\n",
// indices.in_idx, offset->ch_in, offset->ch_out);
}
#endif
atomicAdd(output->data + indices.out_idx, input->data[indices.in_idx] * weight->data[indices.kern_idx]);
//#if CONV_PRINT_DEBUG
// printf("Input: %2.5f, weight: %2.5f, output: %2.5f\n",
// input->data[indices.in_idx], weight->data[indices.kern_idx], output->data[indices.out_idx]);
//#endif
}
__global__ void add_bias(CUDATensor* input, CUDATensor* output, CUDATensor* weight, CUDATensor* bias, ConvOffset* offset) {
ConvInfo indices = get_indices(input, output, weight, offset);
atomicAdd(output->data + indices.out_idx, bias->data[indices.bias_idx]);
#if CONV_PRINT_DEBUG
printf("BIAS:\n");
printf("Out idx: %i, bias idx: %i, output: %2.3f\n", indices.out_idx, indices.bias_idx, output->data[indices.out_idx]);
#endif
}
__global__ void convolve_backward(CUDATensor* input, CUDATensor* d_input, CUDATensor* d_output,
CUDATensor* weight, CUDATensor* bias,
CUDATensor* d_weight, CUDATensor* d_bias,
ConvOffset* offset) {
ConvInfo indices = get_indices(input, d_output, weight, offset);
int n_vals_w = indices.out_width * indices.out_height * indices.n_examples * indices.n_channels_out;
int n_vals_b = indices.out_width * indices.out_height * indices.n_examples * indices.n_channels_out *
indices.n_channels_in * weight->shape[2] * weight->shape[3];
atomicAdd(d_weight->data + indices.kern_idx, input->data[indices.in_idx] * d_output->data[indices.out_idx] / n_vals_w);
atomicAdd(d_bias->data + indices.bias_idx, d_output->data[indices.out_idx] / n_vals_b);
atomicAdd(d_input->data + indices.in_idx, d_output->data[indices.out_idx] * weight->data[indices.kern_idx] / n_vals_w);
#if CONV_BACK_PRINT_DEBUG
printf("Width: %i, height: %i, in idx: %i, out idx: %i, w_idx: %i, b_idx: %i\n", in_width, in_height, in_idx, out_idx, w_idx, b_idx);
printf("Input: %2.3f, output grad: %2.3f, values affected by W: %i, values affected by B: %i\n", input->data[in_idx], d_output->data[out_idx], n_vals_w, n_vals_b);
printf("New dw: %2.5f, new db: %2.5f, w_idx: %i, b_idx: %i\n", d_weight->data[w_idx], d_bias->data[b_idx], w_idx, b_idx);
printf("Sens: %2.5f, weight: %2.3f, grad: %2.3f, in_idx: %i, out_idx: %i, w_idx: %i\n", d_input->data[in_idx], weight->data[w_idx], d_output->data[out_idx], in_idx, out_idx, w_idx);
#endif
}
Conv2d::Conv2d(const int ch_in, const int ch_out, const int width, const int height) : weight(0), bias(0) {
limits.channel = 1;
limits.example = 1;
limits.x_in = 1;
limits.y_in = 1;
limits.x_out = 1;
limits.y_out = 1;
limits.channel = 4;
limits.example = 16;
limits.x_in = 8;
limits.y_in = 8;
limits.x_out = 16;
limits.y_out = 16;
vector<int> weight_shape = { ch_in, ch_out, width, height };
vector<int> bias_shape = { ch_out };
vector<float> weight_data(ch_in * ch_out * width * height, 0);
vector<float> bias_data(ch_out, 0);
weight = new Tensor(weight_shape, &weight_data[0]);
bias = new Tensor(bias_shape, &bias_data[0]);
}
Conv2d::~Conv2d() {
if (weight) {
delete weight;
}
if (bias) {
delete bias;
}
}
void Conv2d::checkShapes(vector<int> input_shape, vector<int> output_shape, vector<int> weight_shape) {
int exp_out_width = input_shape[2] - 2 * (weight_shape[2] / 2);
int exp_out_height = input_shape[3] - 2 * (weight_shape[3] / 2);
int exp_out_channels = weight_shape[1];
int exp_in_channels = weight_shape[0];
if (input_shape[1] != exp_in_channels)
throw TensorShapeError();
if (output_shape[1] != exp_out_channels)
throw TensorShapeError();
if (exp_out_width <= 0 || output_shape[2] != exp_out_width)
throw TensorShapeError();
if (exp_out_height <= 0 || output_shape[3] != exp_out_height)
throw TensorShapeError();
}
void Conv2d::run(Tensor* output, Tensor* input, Tensor* _) {
record_flow(output, input);
output->clear();
vector<int> input_shape = input->getShape();
vector<int> output_shape = output->getShape();
vector<int> weight_shape = weight->getShape();
// throws TensorShapeError
checkShapes(input_shape, output_shape, weight_shape);
ConvOffset offset = { 0 };
ConvOffset* d_offset;
HE(cudaMalloc((void**)&(d_offset), sizeof(ConvOffset)));
RUN_CONV(convolve, offset, limits, weight_shape, output_shape, (
input->getCudaData(),
output->getCudaData(),
weight->getCudaData(),
bias->getCudaData(),
d_offset
)
);
auto limits_local = limits;
limits_local.x_in = 1;
limits_local.channel = weight_shape[1];
weight_shape[0] = 1;
weight_shape[2] = 1;
weight_shape[3] = 1;
RUN_CONV(add_bias, offset, limits_local, weight_shape, output_shape, (
input->getCudaData(),
output->getCudaData(),
weight->getCudaData(),
bias->getCudaData(),
d_offset
)
);
}
void Conv2d::update(float lr) {
//
}
void Conv2d::propagate() {
// Out = f(Wx + b)
// dOut/dW = df/d(Wx + b) * x
weight->clear(true);
bias->clear(true);
vector<int> input_shape = flow_input1->getShape();
vector<int> output_shape = flow_output->getShape();
vector<int> weight_shape = weight->getShape();
ConvOffset offset = { 0 };
ConvOffset* d_offset;
HE(cudaMalloc((void**)&(d_offset), sizeof(ConvOffset)));
RUN_CONV(convolve_backward, offset, limits, weight_shape, output_shape, (
flow_input1->getCudaData(),
flow_input1->getCudaGrad(),
flow_output->getCudaGrad(),
weight->getCudaData(),
bias->getCudaData(),
weight->getCudaGrad(),
bias->getCudaGrad(),
d_offset)
);
}
Conv1d::Conv1d(const int ch_in, const int ch_out, const int width) : Conv2d(ch_in, ch_out, width, 1) {
}
void Conv1d::run(Tensor* output, Tensor* input, Tensor* _) {
Conv2d::run(output->unsqueeze(), input->unsqueeze());
output->squeeze();
input->squeeze();
}
void Conv1d::propagate() {
flow_input1->unsqueeze();
flow_output->unsqueeze();
Conv2d::propagate();
flow_input1->squeeze();
flow_output->squeeze();
}
Conv1d::~Conv1d() {
//if (weight) {
// delete weight;
//}
//if (bias) {
// delete bias;
//}
}
|
e81a54187b3975dbefd0a4b0abeb4e5679117a0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include <vector>
#include <hip/hip_cooperative_groups.h>
#include <cutf/memory.hpp>
#include <cutf/device.hpp>
// CUDAconstexpr
constexpr float rsqrt2 = 1.0f/1.41421356237f;
//
// ATSUKAN?
constexpr std::size_t num_threads_per_block = 1 << 8;
//
using inst_t = uint64_t;
using inst_type_t = uint64_t;
using qubit_t = float;
// (64bit18bit3)
// |53 |35 |17 |
// | inst 2 | inst 1 | inst 0 |
// (18bit)
// |17 15|14 10|9 5 |4 0|
// | | 1 | 0 | |
// (3 bit)
constexpr inst_type_t inst_type_x = 0x1;
constexpr inst_type_t inst_type_z = 0x2;
constexpr inst_type_t inst_type_h = 0x3;
constexpr inst_type_t inst_type_cx = 0x4;
constexpr inst_type_t inst_type_cz = 0x5;
constexpr inst_type_t inst_type_ccx = 0x6;
// constant memory
__constant__ inst_t instruction_array[5 * 1024];
//
__host__ __device__ void debug_print_inst(const inst_t inst, const std::size_t inst_num = 0){
const auto target_inst = (inst >> (inst_num * 18)) & 0x3ffff;
printf("/*0x%016lx*/ ", inst);
const auto inst_type = target_inst >> 15;
const auto control_1 = (target_inst >> 10) & 0x1f;
const auto control_0 = (target_inst >> 5) & 0x1f;
const auto target = target_inst & 0x1f;
if(inst_type == inst_type_x) printf("X %lu", target);
if(inst_type == inst_type_z) printf("Z %lu", target);
if(inst_type == inst_type_h) printf("H %lu", target);
if(inst_type == inst_type_cx) printf("CX %lu %lu", control_0, target);
if(inst_type == inst_type_cz) printf("CZ %lu %lu", control_0, target);
if(inst_type == inst_type_ccx) printf("CCX %lu %lu %lu", control_1, control_0, target);
printf("\n");
}
void debug_print_insts(const inst_t* const insts, const std::size_t num_insts){
printf("loaded instructions\n");
printf("line /* hex inst code */ | decoded code |\n");
printf("--------------------------------------------\n");
for(std::size_t i = 0; i < num_insts; i++){
printf("%4lu ", i);
debug_print_inst(insts[i/3], 2 - i%3);
}
}
inst_t make_inst(const inst_type_t type, const std::size_t control_1, const std::size_t control_0, const std::size_t target){
return (type << 15) | (control_1 << 10) | (control_0 << 5) | target;
}
// Provided
__device__ float warpReduceMax(float val){
for (int offset = warpSize/2; offset > 0; offset /= 2)
#if __CUDACC_VER_MAJOR__ >= 9
val = fmaxf(val, __shfl_down_sync(~0, val, offset));
#else
val = fmaxf(val, __shfl_down(val, offset));
#endif
return val;
}
__global__ void maxabs(float *A, float *m){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int lane = threadIdx.x % warpSize;
float val = fabsf(A[i]);
val = warpReduceMax(val);
if(lane == 0) atomicMax((int *) m, *(int *) &val);
}
__device__ void init_qubits(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
qubits[index] = static_cast<qubit_t>(0);
}
if(tid == 0) qubits[0] = static_cast<qubit_t>(1);
}
__device__ void convert_x(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
// NOTE: forvalidation
const auto p0 = qubits[i0];
const auto p1 = qubits[i1];
qubits[i0] = p1;
qubits[i1] = p0;
}
}
__device__ void convert_z(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
if((i0 & target_bits) != 0)
qubits[i0] = -qubits[i0];
else
qubits[i1] = -qubits[i1];
}
}
__device__ void convert_h(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
// NOTE: forvalidation
const auto p0 = qubits[i0];
const auto p1 = qubits[i1];
if((i0 & target_bits) == 0){
qubits[i0] = (p0 + p1) * rsqrt2;
qubits[i1] = (p0 - p1) * rsqrt2;
}else{
qubits[i0] = (p1 - p0) * rsqrt2;
qubits[i1] = (p1 + p0) * rsqrt2;
}
}
}
__device__ void convert_cx(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t ctrl_bits, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
if((i0 & ctrl_bits) == 0){
continue;
}
// NOTE: forvalidation
const auto p0 = qubits[i0];
const auto p1 = qubits[i1];
qubits[i0] = p1;
qubits[i1] = p0;
}
}
__device__ void convert_cz(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t ctrl_bits, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
if((i0 & ctrl_bits) == 0){
continue;
}
if((i0 & target_bits) != 0)
qubits[i0] = -qubits[i0];
else
qubits[i1] = -qubits[i1];
}
}
__device__ void convert_ccx(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t ctrl_bits_0, const std::size_t ctrl_bits_1, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
if(((i0 & ctrl_bits_0) == 0) || ((i0 & ctrl_bits_1) == 0)){
continue;
}
// NOTE: forvalidation
const auto p0 = qubits[i0];
const auto p1 = qubits[i1];
qubits[i0] = p1;
qubits[i1] = p0;
}
}
__global__ void qusimu_kernel(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t num_insts, const std::size_t num_all_threads){
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
//
init_qubits(qubits, num_qubits, tid, num_all_threads);
// group
const auto all_threads_group = cooperative_groups::this_grid();
//
for(std::size_t inst_index = 0; inst_index < num_insts/3; inst_index++){
const auto packed_inst = instruction_array[inst_index];
// 3
#pragma unroll
for(std::size_t packed_inst_index = 0; packed_inst_index < 3 ; packed_inst_index++){
if((inst_index * 3 + packed_inst_index) >= num_insts) return;
all_threads_group.sync();
//
// packed_inst_index
const auto inst = (packed_inst >> (18 * (2 - packed_inst_index))) & 0x3ffff;
//if(tid == 0) debug_print_inst(inst, 0); // 0
//continue;
// |17 15|15
const auto inst_type = static_cast<inst_type_t>(inst >> 15);
const auto target_bits = static_cast<inst_t>(1) << (inst & 0x1f);
// X
if(inst_type == inst_type_x){
convert_x(qubits, num_qubits, target_bits, tid, num_all_threads);
continue;
}
// Z
if(inst_type == inst_type_z){
convert_z(qubits, num_qubits, target_bits, tid, num_all_threads);
continue;
}
// H
if(inst_type == inst_type_h){
convert_h(qubits, num_qubits, target_bits, tid, num_all_threads);
continue;
}
const auto ctrl_bits_0 = static_cast<inst_t>(1) << ((inst >> 5) & 0x1f);
// CX
if(inst_type == inst_type_cx){
convert_cx(qubits, num_qubits, ctrl_bits_0, target_bits, tid, num_all_threads);
continue;
}
// CZ
if(inst_type == inst_type_cz){
convert_cz(qubits, num_qubits, ctrl_bits_0, target_bits, tid, num_all_threads);
continue;
}
const auto ctrl_bits_1 = static_cast<inst_t>(1) << ((inst >> 10) & 0x1f);
// CCX
if(inst_type == inst_type_ccx){
convert_ccx(qubits, num_qubits, ctrl_bits_0, ctrl_bits_1, target_bits, tid, num_all_threads);
continue;
}
}
}
// sync all test
/*for(std::size_t i = 0; i < num_all_threads; i++){
all_threads_group.sync();
if(i == tid){
printf("%lu \n", i);
}
}*/
}
int main(){
std::size_t n, num_insts;
std::cin >> n >> num_insts;
//
const std::size_t N = 1 << n;
// on
auto d_qubits_uptr = cutf::cuda::memory::get_device_unique_ptr<qubit_t>(N);
//
inst_t insts[5 * 1024];
std::size_t inst_index = 0;
//
std::size_t k_index = 0;
for(; k_index < num_insts; k_index++){
char gate[4];
//
std::scanf("%s", gate);
//
if(gate[0] == 'X' && gate[1] == '\0'){
std::size_t target;
std::scanf("%lu", &target);
insts[inst_index] |= make_inst(inst_type_x, 0, 0, target);
}else if(gate[0] == 'Z' && gate[1] == '\0'){
std::size_t target;
std::scanf("%lu", &target);
insts[inst_index] |= make_inst(inst_type_z, 0, 0, target);
}else if(gate[0] == 'H' && gate[1] == '\0'){
std::size_t target;
std::scanf("%lu", &target);
insts[inst_index] |= make_inst(inst_type_h, 0, 0, target);
}else if(gate[0] == 'C' && gate[1] == 'X' && gate[2] == '\0'){
std::size_t target, ctrl;
std::scanf("%lu%lu", &ctrl, &target);
insts[inst_index] |= make_inst(inst_type_cx, 0, ctrl, target);
}else if(gate[0] == 'C' && gate[1] == 'Z' && gate[2] == '\0'){
std::size_t target, ctrl;
std::scanf("%lu%lu", &ctrl, &target);
insts[inst_index] |= make_inst(inst_type_cz, 0, ctrl, target);
}else if(gate[0] == 'C' && gate[1] == 'C' && gate[2] == 'X' && gate[3] == '\0'){
std::size_t target, ctrl_0, ctrl_1;
std::scanf("%lu%lu%lu", &ctrl_1, &ctrl_0, &target);
insts[inst_index] |= make_inst(inst_type_ccx, ctrl_1, ctrl_0, target);
}
if(k_index % 3 == 2){
inst_index++;
}else{
insts[inst_index] <<= 18;
}
}
// inst_index++()
//
if(k_index % 3 == 0){
insts[inst_index] <<= 36;
}else if(k_index % 3 == 1){
insts[inst_index] <<= 18;
}
// on
// TODO : Constant
cutf::cuda::error::check(hipMemcpyToSymbol(instruction_array, insts, 5 * 1024 * sizeof(inst_t)), __FILE__, __LINE__, __func__);
// Occupansyblock
const auto device_list = cutf::cuda::device::get_properties_vector();
int num_blocks_0 = device_list[0].multiProcessorCount;
int num_blocks_1;
cutf::cuda::error::check(hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_1, qusimu_kernel, num_threads_per_block, 0), __FILE__, __LINE__, __func__);
int num_blocks = num_blocks_0 * num_blocks_1;
std::cout<<"Grid size : "<<num_blocks<<std::endl;
std::cout<<"Block size : "<<num_threads_per_block<<std::endl;
const std::size_t num_all_threads = num_blocks * num_threads_per_block;
// cooperative_groupsthis_gridLaunch
const dim3 grid(num_blocks);
const dim3 block(num_threads_per_block);
const auto d_qubits_ptr = d_qubits_uptr.get();
const void* args[] = {
reinterpret_cast<void* const*>(&d_qubits_ptr),
reinterpret_cast<const void*>(&N),
reinterpret_cast<const void*>(&num_insts),
reinterpret_cast<const void*>(&num_all_threads),
nullptr
};
cutf::cuda::error::check(hipLaunchCooperativeKernel(reinterpret_cast<void*>(qusimu_kernel), grid, block, (void**)args), __FILE__, __LINE__, __func__);
hipDeviceSynchronize();
//
/*
auto d_max_uptr = cutf::cuda::memory::get_device_unique_ptr<qubit_t>(1);
float h_max = 0.0f;
cutf::cuda::memory::copy(d_max_uptr.get(), &h_max, 1);
maxabs<<<(N + num_threads_per_block - 1)/num_threads_per_block, num_threads_per_block>>>(d_qubits_uptr.get(), d_max_uptr.get());
cutf::cuda::memory::copy(&h_max, d_max_uptr.get(), 1);
printf("%e\n", h_max * h_max);
*/
auto h_qubits_uptr = cutf::cuda::memory::get_host_unique_ptr<qubit_t>(N);
cutf::cuda::memory::copy(h_qubits_uptr.get(), d_qubits_uptr.get(), N);
std::size_t max_i;
qubit_t max_p = 0;
for(std::size_t i = 0; i < N; i++){
//printf("[%8lu] : %.8f\n", i, h_qubits_uptr.get()[i]);
const auto p = h_qubits_uptr.get()[i];
if(p * p > max_p * max_p){
max_p = p;
max_i = i;
}
}
printf("%lu\n%.8e\n", max_i, (max_p * max_p));
}
| e81a54187b3975dbefd0a4b0abeb4e5679117a0d.cu | #include <iostream>
#include <string>
#include <vector>
#include <cooperative_groups.h>
#include <cutf/memory.hpp>
#include <cutf/device.hpp>
// CUDAの組み込み関数はconstexprではないので
constexpr float rsqrt2 = 1.0f/1.41421356237f;
// スレッド数
// ATSUKANを走らせてもいいかも?
constexpr std::size_t num_threads_per_block = 1 << 8;
// 命令は固定長
using inst_t = uint64_t;
using inst_type_t = uint64_t;
using qubit_t = float;
// 命令塊 (64bitに18bit命令を3命令詰め込む)
// |53 |35 |17 |
// | inst 2 | inst 1 | inst 0 |
// 命令 (18bit)
// |17 15|14 10|9 5 |4 0|
// | 命令種別 | コントロール1 | コントロール0 | ターゲット |
// 命令種別 (3 bit)
constexpr inst_type_t inst_type_x = 0x1;
constexpr inst_type_t inst_type_z = 0x2;
constexpr inst_type_t inst_type_h = 0x3;
constexpr inst_type_t inst_type_cx = 0x4;
constexpr inst_type_t inst_type_cz = 0x5;
constexpr inst_type_t inst_type_ccx = 0x6;
// 命令はconstant memoryに乗せておく
__constant__ inst_t instruction_array[5 * 1024];
// デバッグ用
__host__ __device__ void debug_print_inst(const inst_t inst, const std::size_t inst_num = 0){
const auto target_inst = (inst >> (inst_num * 18)) & 0x3ffff;
printf("/*0x%016lx*/ ", inst);
const auto inst_type = target_inst >> 15;
const auto control_1 = (target_inst >> 10) & 0x1f;
const auto control_0 = (target_inst >> 5) & 0x1f;
const auto target = target_inst & 0x1f;
if(inst_type == inst_type_x) printf("X %lu", target);
if(inst_type == inst_type_z) printf("Z %lu", target);
if(inst_type == inst_type_h) printf("H %lu", target);
if(inst_type == inst_type_cx) printf("CX %lu %lu", control_0, target);
if(inst_type == inst_type_cz) printf("CZ %lu %lu", control_0, target);
if(inst_type == inst_type_ccx) printf("CCX %lu %lu %lu", control_1, control_0, target);
printf("\n");
}
void debug_print_insts(const inst_t* const insts, const std::size_t num_insts){
printf("loaded instructions\n");
printf("line /* hex inst code */ | decoded code |\n");
printf("--------------------------------------------\n");
for(std::size_t i = 0; i < num_insts; i++){
printf("%4lu ", i);
debug_print_inst(insts[i/3], 2 - i%3);
}
}
inst_t make_inst(const inst_type_t type, const std::size_t control_1, const std::size_t control_0, const std::size_t target){
return (type << 15) | (control_1 << 10) | (control_0 << 5) | target;
}
// Provided
__device__ float warpReduceMax(float val){
for (int offset = warpSize/2; offset > 0; offset /= 2)
#if __CUDACC_VER_MAJOR__ >= 9
val = fmaxf(val, __shfl_down_sync(~0, val, offset));
#else
val = fmaxf(val, __shfl_down(val, offset));
#endif
return val;
}
__global__ void maxabs(float *A, float *m){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int lane = threadIdx.x % warpSize;
float val = fabsf(A[i]);
val = warpReduceMax(val);
if(lane == 0) atomicMax((int *) m, *(int *) &val);
}
__device__ void init_qubits(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
qubits[index] = static_cast<qubit_t>(0);
}
if(tid == 0) qubits[0] = static_cast<qubit_t>(1);
}
__device__ void convert_x(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
// NOTE: for内で値のvalidationをしているのでここでは省略
const auto p0 = qubits[i0];
const auto p1 = qubits[i1];
qubits[i0] = p1;
qubits[i1] = p0;
}
}
__device__ void convert_z(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
if((i0 & target_bits) != 0)
qubits[i0] = -qubits[i0];
else
qubits[i1] = -qubits[i1];
}
}
__device__ void convert_h(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
// NOTE: for内で値のvalidationをしているのでここでは省略
const auto p0 = qubits[i0];
const auto p1 = qubits[i1];
if((i0 & target_bits) == 0){
qubits[i0] = (p0 + p1) * rsqrt2;
qubits[i1] = (p0 - p1) * rsqrt2;
}else{
qubits[i0] = (p1 - p0) * rsqrt2;
qubits[i1] = (p1 + p0) * rsqrt2;
}
}
}
__device__ void convert_cx(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t ctrl_bits, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
if((i0 & ctrl_bits) == 0){
continue;
}
// NOTE: for内で値のvalidationをしているのでここでは省略
const auto p0 = qubits[i0];
const auto p1 = qubits[i1];
qubits[i0] = p1;
qubits[i1] = p0;
}
}
__device__ void convert_cz(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t ctrl_bits, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
if((i0 & ctrl_bits) == 0){
continue;
}
if((i0 & target_bits) != 0)
qubits[i0] = -qubits[i0];
else
qubits[i1] = -qubits[i1];
}
}
__device__ void convert_ccx(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t ctrl_bits_0, const std::size_t ctrl_bits_1, const std::size_t target_bits, const std::size_t tid, const std::size_t num_all_threads){
for(std::size_t i = 0, index; (index = i + tid) < (num_qubits >> 1); i+= num_all_threads){
const auto i0 = (index / target_bits) * (target_bits << 1) + (index % target_bits);
const auto i1 = i0 ^ target_bits;
if(((i0 & ctrl_bits_0) == 0) || ((i0 & ctrl_bits_1) == 0)){
continue;
}
// NOTE: for内で値のvalidationをしているのでここでは省略
const auto p0 = qubits[i0];
const auto p1 = qubits[i1];
qubits[i0] = p1;
qubits[i1] = p0;
}
}
__global__ void qusimu_kernel(qubit_t* const qubits, const std::size_t num_qubits, const std::size_t num_insts, const std::size_t num_all_threads){
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
// 初期化
init_qubits(qubits, num_qubits, tid, num_all_threads);
// 全スレッドでgroupを作る
const auto all_threads_group = cooperative_groups::this_grid();
// 命令実行ループ
for(std::size_t inst_index = 0; inst_index < num_insts/3; inst_index++){
const auto packed_inst = instruction_array[inst_index];
// 3つ固まっているうち何番目を使うか
#pragma unroll
for(std::size_t packed_inst_index = 0; packed_inst_index < 3 ; packed_inst_index++){
if((inst_index * 3 + packed_inst_index) >= num_insts) return;
all_threads_group.sync();
// デコード
// packed_inst_index番目を取り出す
const auto inst = (packed_inst >> (18 * (2 - packed_inst_index))) & 0x3ffff;
//if(tid == 0) debug_print_inst(inst, 0); // 切り出したものなので常に0番目
//continue;
// |17 15|が命令種別なのでマジックナンバー15
const auto inst_type = static_cast<inst_type_t>(inst >> 15);
const auto target_bits = static_cast<inst_t>(1) << (inst & 0x1f);
// X
if(inst_type == inst_type_x){
convert_x(qubits, num_qubits, target_bits, tid, num_all_threads);
continue;
}
// Z
if(inst_type == inst_type_z){
convert_z(qubits, num_qubits, target_bits, tid, num_all_threads);
continue;
}
// H
if(inst_type == inst_type_h){
convert_h(qubits, num_qubits, target_bits, tid, num_all_threads);
continue;
}
const auto ctrl_bits_0 = static_cast<inst_t>(1) << ((inst >> 5) & 0x1f);
// CX
if(inst_type == inst_type_cx){
convert_cx(qubits, num_qubits, ctrl_bits_0, target_bits, tid, num_all_threads);
continue;
}
// CZ
if(inst_type == inst_type_cz){
convert_cz(qubits, num_qubits, ctrl_bits_0, target_bits, tid, num_all_threads);
continue;
}
const auto ctrl_bits_1 = static_cast<inst_t>(1) << ((inst >> 10) & 0x1f);
// CCX
if(inst_type == inst_type_ccx){
convert_ccx(qubits, num_qubits, ctrl_bits_0, ctrl_bits_1, target_bits, tid, num_all_threads);
continue;
}
}
}
// sync all test
/*for(std::size_t i = 0; i < num_all_threads; i++){
all_threads_group.sync();
if(i == tid){
printf("%lu \n", i);
}
}*/
}
int main(){
std::size_t n, num_insts;
std::cin >> n >> num_insts;
// 量子ビットの組み合わせ総数
const std::size_t N = 1 << n;
// 量子ビット on デバイスメモリ
auto d_qubits_uptr = cutf::cuda::memory::get_device_unique_ptr<qubit_t>(N);
// 発行命令列
inst_t insts[5 * 1024];
std::size_t inst_index = 0;
// 読み取り
std::size_t k_index = 0;
for(; k_index < num_insts; k_index++){
char gate[4];
// 命令種別読み取り
std::scanf("%s", gate);
// 解析
if(gate[0] == 'X' && gate[1] == '\0'){
std::size_t target;
std::scanf("%lu", &target);
insts[inst_index] |= make_inst(inst_type_x, 0, 0, target);
}else if(gate[0] == 'Z' && gate[1] == '\0'){
std::size_t target;
std::scanf("%lu", &target);
insts[inst_index] |= make_inst(inst_type_z, 0, 0, target);
}else if(gate[0] == 'H' && gate[1] == '\0'){
std::size_t target;
std::scanf("%lu", &target);
insts[inst_index] |= make_inst(inst_type_h, 0, 0, target);
}else if(gate[0] == 'C' && gate[1] == 'X' && gate[2] == '\0'){
std::size_t target, ctrl;
std::scanf("%lu%lu", &ctrl, &target);
insts[inst_index] |= make_inst(inst_type_cx, 0, ctrl, target);
}else if(gate[0] == 'C' && gate[1] == 'Z' && gate[2] == '\0'){
std::size_t target, ctrl;
std::scanf("%lu%lu", &ctrl, &target);
insts[inst_index] |= make_inst(inst_type_cz, 0, ctrl, target);
}else if(gate[0] == 'C' && gate[1] == 'C' && gate[2] == 'X' && gate[3] == '\0'){
std::size_t target, ctrl_0, ctrl_1;
std::scanf("%lu%lu%lu", &ctrl_1, &ctrl_0, &target);
insts[inst_index] |= make_inst(inst_type_ccx, ctrl_1, ctrl_0, target);
}
if(k_index % 3 == 2){
inst_index++;
}else{
insts[inst_index] <<= 18;
}
}
// ループ内でinst_index++しているから,配列サイズ(固定値)を変更すると範囲外アクセスの危険大
// そのため事前に判定しておく
if(k_index % 3 == 0){
insts[inst_index] <<= 36;
}else if(k_index % 3 == 1){
insts[inst_index] <<= 18;
}
// 命令列 on デバイスメモリ
// TODO : 本当はConstantメモリに載せたい
cutf::cuda::error::check(cudaMemcpyToSymbol(instruction_array, insts, 5 * 1024 * sizeof(inst_t)), __FILE__, __LINE__, __func__);
// Occupansyが最大になるblock数を取得
const auto device_list = cutf::cuda::device::get_properties_vector();
int num_blocks_0 = device_list[0].multiProcessorCount;
int num_blocks_1;
cutf::cuda::error::check(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks_1, qusimu_kernel, num_threads_per_block, 0), __FILE__, __LINE__, __func__);
int num_blocks = num_blocks_0 * num_blocks_1;
std::cout<<"Grid size : "<<num_blocks<<std::endl;
std::cout<<"Block size : "<<num_threads_per_block<<std::endl;
const std::size_t num_all_threads = num_blocks * num_threads_per_block;
// cooperative_groupsでthis_gridを使うので,Launchを手動で行う
const dim3 grid(num_blocks);
const dim3 block(num_threads_per_block);
const auto d_qubits_ptr = d_qubits_uptr.get();
const void* args[] = {
reinterpret_cast<void* const*>(&d_qubits_ptr),
reinterpret_cast<const void*>(&N),
reinterpret_cast<const void*>(&num_insts),
reinterpret_cast<const void*>(&num_all_threads),
nullptr
};
cutf::cuda::error::check(cudaLaunchCooperativeKernel(reinterpret_cast<void*>(qusimu_kernel), grid, block, (void**)args), __FILE__, __LINE__, __func__);
cudaDeviceSynchronize();
// 最大のものを取り出す
/*
auto d_max_uptr = cutf::cuda::memory::get_device_unique_ptr<qubit_t>(1);
float h_max = 0.0f;
cutf::cuda::memory::copy(d_max_uptr.get(), &h_max, 1);
maxabs<<<(N + num_threads_per_block - 1)/num_threads_per_block, num_threads_per_block>>>(d_qubits_uptr.get(), d_max_uptr.get());
cutf::cuda::memory::copy(&h_max, d_max_uptr.get(), 1);
printf("%e\n", h_max * h_max);
*/
auto h_qubits_uptr = cutf::cuda::memory::get_host_unique_ptr<qubit_t>(N);
cutf::cuda::memory::copy(h_qubits_uptr.get(), d_qubits_uptr.get(), N);
std::size_t max_i;
qubit_t max_p = 0;
for(std::size_t i = 0; i < N; i++){
//printf("[%8lu] : %.8f\n", i, h_qubits_uptr.get()[i]);
const auto p = h_qubits_uptr.get()[i];
if(p * p > max_p * max_p){
max_p = p;
max_i = i;
}
}
printf("%lu\n%.8e\n", max_i, (max_p * max_p));
}
|
9270e5d49a6c795286e90074c487fa0eca5bffe7.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <cmath>
using namespace std;
const int TILE_WIDTH = 16;
__global__ void MatrixMulKernel(int *d_M,int *d_N,int *d_P,int m,int n,int k)
{
__shared__ int ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ int ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
//Identify the row and column of the Pd element to work on
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int pValue = 0;
//loop over the Md and Nd tiles required to comput the Pd element
for(int t = 0; t < (n-1) / TILE_WIDTH + 1; ++t)
{
if(row < m && t * TILE_WIDTH + tx < n)
ds_M[ty][tx] = d_M[row * n + t * TILE_WIDTH + tx];
else
ds_M[ty][tx] = 0;
if(col < k && t * TILE_WIDTH + ty < n)
ds_N[ty][tx] = d_N[(t * TILE_WIDTH + ty) * k + col];
else
ds_N[ty][tx] = 0;
__syncthreads();
for(int i = 0; i < TILE_WIDTH; ++i)
pValue += ds_M[ty][i] * ds_N[i][tx];
__syncthreads();
}
if(row < m && col < k)
d_P[row * k + col] = pValue;
}
int main()
{
//freopen("out","w",stdout);
int m = 600, n = 700, k = 1000;
int *h_M, *h_N, *d_M, *d_N;
int *h_P, *d_P;
size_t sizeM = m * n * sizeof(int);
size_t sizeN = n * k * sizeof(int);
size_t sizeP = m * k * sizeof(int);
h_M = (int *) malloc(sizeM);
h_N = (int *) malloc(sizeN);
h_P = (int *) malloc(sizeP);
hipMalloc(&d_M,sizeM);
hipMalloc(&d_N,sizeN);
hipMalloc(&d_P,sizeP);
for(int i = 0; i < m * n; ++i)
{
if(i % 2 == 0)
h_M[i] = 1;
else
h_M[i] = 0;
}
for(int i = 0;i < n * k; ++i)
{
if(i % 2 == 0)
h_N[i] = 0;
else
h_N[i] = 1;
}
hipMemcpy(d_M,h_M,sizeM,hipMemcpyHostToDevice);
hipMemcpy(d_N,h_N,sizeN,hipMemcpyHostToDevice);
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
dim3 grid((int)ceil(k*1.0 / TILE_WIDTH), (int)ceil(m*1.0/ TILE_WIDTH));
dim3 block(TILE_WIDTH,TILE_WIDTH);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(grid),dim3(block), 0, 0, d_M,d_N,d_P,m,n,k);
hipEventRecord(stop,0);
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float ElapsedTime;
hipEventElapsedTime(&ElapsedTime,start,stop);
printf("Kernel Elpased Time: %.3f ms\n",ElapsedTime);
hipMemcpy(h_P,d_P,sizeP,hipMemcpyDeviceToHost);
/*
for(int i = 0; i < m * k; ++i)
printf("%d\n",h_P[i]);
printf("\n");
*/
free(h_P);
free(h_M);
free(h_N);
hipFree(d_P);
hipFree(d_M);
hipFree(d_N);
return 0;
}
| 9270e5d49a6c795286e90074c487fa0eca5bffe7.cu | #include <iostream>
#include <cstdio>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cmath>
using namespace std;
const int TILE_WIDTH = 16;
__global__ void MatrixMulKernel(int *d_M,int *d_N,int *d_P,int m,int n,int k)
{
__shared__ int ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ int ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
//Identify the row and column of the Pd element to work on
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int pValue = 0;
//loop over the Md and Nd tiles required to comput the Pd element
for(int t = 0; t < (n-1) / TILE_WIDTH + 1; ++t)
{
if(row < m && t * TILE_WIDTH + tx < n)
ds_M[ty][tx] = d_M[row * n + t * TILE_WIDTH + tx];
else
ds_M[ty][tx] = 0;
if(col < k && t * TILE_WIDTH + ty < n)
ds_N[ty][tx] = d_N[(t * TILE_WIDTH + ty) * k + col];
else
ds_N[ty][tx] = 0;
__syncthreads();
for(int i = 0; i < TILE_WIDTH; ++i)
pValue += ds_M[ty][i] * ds_N[i][tx];
__syncthreads();
}
if(row < m && col < k)
d_P[row * k + col] = pValue;
}
int main()
{
//freopen("out","w",stdout);
int m = 600, n = 700, k = 1000;
int *h_M, *h_N, *d_M, *d_N;
int *h_P, *d_P;
size_t sizeM = m * n * sizeof(int);
size_t sizeN = n * k * sizeof(int);
size_t sizeP = m * k * sizeof(int);
h_M = (int *) malloc(sizeM);
h_N = (int *) malloc(sizeN);
h_P = (int *) malloc(sizeP);
cudaMalloc(&d_M,sizeM);
cudaMalloc(&d_N,sizeN);
cudaMalloc(&d_P,sizeP);
for(int i = 0; i < m * n; ++i)
{
if(i % 2 == 0)
h_M[i] = 1;
else
h_M[i] = 0;
}
for(int i = 0;i < n * k; ++i)
{
if(i % 2 == 0)
h_N[i] = 0;
else
h_N[i] = 1;
}
cudaMemcpy(d_M,h_M,sizeM,cudaMemcpyHostToDevice);
cudaMemcpy(d_N,h_N,sizeN,cudaMemcpyHostToDevice);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
dim3 grid((int)ceil(k*1.0 / TILE_WIDTH), (int)ceil(m*1.0/ TILE_WIDTH));
dim3 block(TILE_WIDTH,TILE_WIDTH);
MatrixMulKernel<<<grid,block>>>(d_M,d_N,d_P,m,n,k);
cudaEventRecord(stop,0);
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float ElapsedTime;
cudaEventElapsedTime(&ElapsedTime,start,stop);
printf("Kernel Elpased Time: %.3f ms\n",ElapsedTime);
cudaMemcpy(h_P,d_P,sizeP,cudaMemcpyDeviceToHost);
/*
for(int i = 0; i < m * k; ++i)
printf("%d\n",h_P[i]);
printf("\n");
*/
free(h_P);
free(h_M);
free(h_N);
cudaFree(d_P);
cudaFree(d_M);
cudaFree(d_N);
return 0;
}
|
42fcfde8a35dafa8bdcba07eea606ada8461a951.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/common.h"
#include <stdio.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
hipLaunchKernelGGL(( helloFromGPU), dim3(1), dim3(9), 0, 0, );
CHECK(hipDeviceReset());
return 0;
}
| 42fcfde8a35dafa8bdcba07eea606ada8461a951.cu | #include "../common/common.h"
#include <stdio.h>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
helloFromGPU<<<1, 9>>>();
CHECK(cudaDeviceReset());
return 0;
}
|
681cb3cb053b4f047640cfdb0a4d255442b089b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* kmeans example 2D
*/
void initialData(short int *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (short int)( rand() & 0xFFFF );
}
return;
}
/*
nvcc -arch=sm_35 -Xptxas -v kmeans_v2.cu
ptxas info : 0 bytes gmem
ptxas info : Compiling entry function '_Z7kmeans4PsS_PiS0_i' for 'sm_35'
ptxas info : Function properties for _Z7kmeans4PsS_PiS0_i
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 22 registers, 356 bytes cmem[0]
*/
__global__ void kmeans4 (short int *input, short int*centroids, int*newcentroids, int *counter, const int n)
{
int Dim = 4;
int i = (blockIdx.x * blockDim.x + threadIdx.x)*Dim;
if ( i < n ) {
// map
int point_d0 = input[i+0];
int point_d1 = input[i+1];
int point_d2 = input[i+2];
int point_d3 = input[i+3];
int k0_d0 = point_d0 - centroids[0];
int k0_d1 = point_d1 - centroids[1];
int k0_d2 = point_d2 - centroids[2];
int k0_d3 = point_d3 - centroids[3];
int k1_d0 = point_d0 - centroids[4];
int k1_d1 = point_d1 - centroids[5];
int k1_d2 = point_d2 - centroids[6];
int k1_d3 = point_d3 - centroids[7];
int k2_d0 = point_d0 - centroids[8];
int k2_d1 = point_d1 - centroids[9];
int k2_d2 = point_d2 - centroids[10];
int k2_d3 = point_d3 - centroids[11];
int k3_d0 = point_d0 - centroids[12];
int k3_d1 = point_d1 - centroids[13];
int k3_d2 = point_d2 - centroids[14];
int k3_d3 = point_d3 - centroids[15];
k0_d0 *= k0_d0;
k0_d1 *= k0_d1;
k0_d2 *= k0_d2;
k0_d3 *= k0_d3;
k1_d0 *= k1_d0;
k1_d1 *= k1_d1;
k1_d2 *= k1_d2;
k1_d3 *= k1_d3;
k2_d0 *= k2_d0;
k2_d1 *= k2_d1;
k2_d2 *= k2_d2;
k2_d3 *= k2_d3;
k3_d0 *= k3_d0;
k3_d1 *= k3_d1;
k3_d2 *= k3_d2;
k3_d3 *= k3_d3;
// reduce sum
k0_d0 = k0_d0 + k0_d1 + k0_d2 + k0_d3;
k1_d0 = k1_d0 + k1_d1 + k1_d2 + k1_d3;
k2_d0 = k2_d0 + k2_d1 + k2_d2 + k2_d3;
k3_d0 = k3_d0 + k3_d1 + k3_d2 + k3_d3;
// reduce min
int k01 = (k0_d0 < k1_d0 ) ? 0 : 1;
int mink01 = (k0_d0 < k1_d0 ) ? k0_d0: k1_d0 ;
int k23 = (k2_d0 < k3_d0 ) ? 2 : 3;
int mink23 = (k2_d0 < k3_d0 ) ? k2_d0: k3_d0 ;
int k = (mink01 < mink23) ? k01 : k23;
// add current point to new centroids sum
atomicAdd(&(newcentroids[Dim*k]), point_d0);
atomicAdd(&(newcentroids[Dim*k+1]),point_d1);
atomicAdd(&(newcentroids[Dim*k+2]), point_d2);
atomicAdd(&(newcentroids[Dim*k+3]),point_d3);
atomicAdd(&(counter[k]),1);
} // if
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of vectors
int Dim = 4;
int k = 4;
int nElem = (1 << 21)*Dim;
printf("Vector Size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(short int);
size_t cBytes = Dim * k * sizeof(short int);
size_t sBytes = Dim * k * sizeof(int);
size_t tBytes = k * sizeof(int);
short int *h_data, *h_centroids;
h_data = (short int *)malloc(nBytes);
initialData(h_data,nElem);
h_centroids = (short int *)malloc(cBytes);
//initialData(h_centroids,k*Dim);
h_centroids[0]= 15000;
h_centroids[1]= 15000;
h_centroids[2]= 15000;
h_centroids[3]= 15000;
h_centroids[4]= 5000;
h_centroids[5]= 5000;
h_centroids[6]= 5000;
h_centroids[7]= 5000;
h_centroids[8]= -15000;
h_centroids[9]= -15000;
h_centroids[10]= -15000;
h_centroids[11]= -15000;
h_centroids[12]= -5000;
h_centroids[13]= -5000;
h_centroids[14]= -5000;
h_centroids[15]= -5000;
int *h_newcentroids;
h_newcentroids = (int *)malloc(sBytes);
int *h_counter;
h_counter = (int *)malloc(tBytes);
memset(h_newcentroids, 0, sBytes);
memset(h_counter, 0, tBytes);
// malloc device global memory
short int *d_data, *d_centroids;
CHECK(hipMalloc((short int**)&d_data, nBytes));
CHECK(hipMalloc((short int**)&d_centroids, cBytes));
int *d_newcentroids;
CHECK(hipMalloc((int**)&d_newcentroids, sBytes));
int *d_counter;
CHECK(hipMalloc((int**)&d_counter, tBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_data, h_data, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_centroids, h_centroids, cBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_newcentroids, h_newcentroids, sBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_counter, h_counter, tBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
int iLen = 256;
dim3 block (iLen);
dim3 grid (((nElem/Dim + block.x - 1) / block.x));
for (int i=0;i < 20; i++) {
hipLaunchKernelGGL(( kmeans4), dim3(grid), dim3(block), 0, 0, d_data, d_centroids, d_newcentroids, d_counter, nElem);
CHECK(hipDeviceSynchronize());
printf("kmeans <<< %d, %d >>> \n", grid.x,
block.x);
// check kernel error
CHECK(hipGetLastError()) ;
// copy kernel result back to host side
CHECK(hipMemcpy(h_newcentroids, d_newcentroids, sBytes, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(h_counter, d_counter, tBytes, hipMemcpyDeviceToHost));
int total = nElem/Dim;
for (int j=0; j < k*Dim; j++) {
printf(" centroids dim %d value %d \n", j, h_centroids[j]);
h_centroids[j] = (short int) (h_newcentroids[j]/h_counter[j/4]);
if ( j% 4) printf(" counter j %d = %d \n", j/4, h_counter[j/4]);
if ( j% 4 == 0 )total -= h_counter[j/4];
}
if ( total== 0 ) printf(" ---OK------------ \n");
else printf(" ---total %d------------ \n",total);
memset(h_newcentroids, 0, sBytes);
memset(h_counter, 0, tBytes);
CHECK(hipMemcpy(d_centroids, h_centroids, cBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_newcentroids, h_newcentroids, sBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_counter, h_counter, tBytes, hipMemcpyHostToDevice));
}
// check device results
// free device global memory
CHECK(hipFree(d_data));
CHECK(hipFree(d_centroids));
CHECK(hipFree(d_newcentroids));
CHECK(hipFree(d_counter));
// free host memory
free(h_data);
free(h_centroids);
free(h_newcentroids);
free(h_counter);
return(0);
}
| 681cb3cb053b4f047640cfdb0a4d255442b089b9.cu | #include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* kmeans example 2D
*/
void initialData(short int *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (short int)( rand() & 0xFFFF );
}
return;
}
/*
nvcc -arch=sm_35 -Xptxas -v kmeans_v2.cu
ptxas info : 0 bytes gmem
ptxas info : Compiling entry function '_Z7kmeans4PsS_PiS0_i' for 'sm_35'
ptxas info : Function properties for _Z7kmeans4PsS_PiS0_i
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 22 registers, 356 bytes cmem[0]
*/
__global__ void kmeans4 (short int *input, short int*centroids, int*newcentroids, int *counter, const int n)
{
int Dim = 4;
int i = (blockIdx.x * blockDim.x + threadIdx.x)*Dim;
if ( i < n ) {
// map
int point_d0 = input[i+0];
int point_d1 = input[i+1];
int point_d2 = input[i+2];
int point_d3 = input[i+3];
int k0_d0 = point_d0 - centroids[0];
int k0_d1 = point_d1 - centroids[1];
int k0_d2 = point_d2 - centroids[2];
int k0_d3 = point_d3 - centroids[3];
int k1_d0 = point_d0 - centroids[4];
int k1_d1 = point_d1 - centroids[5];
int k1_d2 = point_d2 - centroids[6];
int k1_d3 = point_d3 - centroids[7];
int k2_d0 = point_d0 - centroids[8];
int k2_d1 = point_d1 - centroids[9];
int k2_d2 = point_d2 - centroids[10];
int k2_d3 = point_d3 - centroids[11];
int k3_d0 = point_d0 - centroids[12];
int k3_d1 = point_d1 - centroids[13];
int k3_d2 = point_d2 - centroids[14];
int k3_d3 = point_d3 - centroids[15];
k0_d0 *= k0_d0;
k0_d1 *= k0_d1;
k0_d2 *= k0_d2;
k0_d3 *= k0_d3;
k1_d0 *= k1_d0;
k1_d1 *= k1_d1;
k1_d2 *= k1_d2;
k1_d3 *= k1_d3;
k2_d0 *= k2_d0;
k2_d1 *= k2_d1;
k2_d2 *= k2_d2;
k2_d3 *= k2_d3;
k3_d0 *= k3_d0;
k3_d1 *= k3_d1;
k3_d2 *= k3_d2;
k3_d3 *= k3_d3;
// reduce sum
k0_d0 = k0_d0 + k0_d1 + k0_d2 + k0_d3;
k1_d0 = k1_d0 + k1_d1 + k1_d2 + k1_d3;
k2_d0 = k2_d0 + k2_d1 + k2_d2 + k2_d3;
k3_d0 = k3_d0 + k3_d1 + k3_d2 + k3_d3;
// reduce min
int k01 = (k0_d0 < k1_d0 ) ? 0 : 1;
int mink01 = (k0_d0 < k1_d0 ) ? k0_d0: k1_d0 ;
int k23 = (k2_d0 < k3_d0 ) ? 2 : 3;
int mink23 = (k2_d0 < k3_d0 ) ? k2_d0: k3_d0 ;
int k = (mink01 < mink23) ? k01 : k23;
// add current point to new centroids sum
atomicAdd(&(newcentroids[Dim*k]), point_d0);
atomicAdd(&(newcentroids[Dim*k+1]),point_d1);
atomicAdd(&(newcentroids[Dim*k+2]), point_d2);
atomicAdd(&(newcentroids[Dim*k+3]),point_d3);
atomicAdd(&(counter[k]),1);
} // if
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of vectors
int Dim = 4;
int k = 4;
int nElem = (1 << 21)*Dim;
printf("Vector Size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(short int);
size_t cBytes = Dim * k * sizeof(short int);
size_t sBytes = Dim * k * sizeof(int);
size_t tBytes = k * sizeof(int);
short int *h_data, *h_centroids;
h_data = (short int *)malloc(nBytes);
initialData(h_data,nElem);
h_centroids = (short int *)malloc(cBytes);
//initialData(h_centroids,k*Dim);
h_centroids[0]= 15000;
h_centroids[1]= 15000;
h_centroids[2]= 15000;
h_centroids[3]= 15000;
h_centroids[4]= 5000;
h_centroids[5]= 5000;
h_centroids[6]= 5000;
h_centroids[7]= 5000;
h_centroids[8]= -15000;
h_centroids[9]= -15000;
h_centroids[10]= -15000;
h_centroids[11]= -15000;
h_centroids[12]= -5000;
h_centroids[13]= -5000;
h_centroids[14]= -5000;
h_centroids[15]= -5000;
int *h_newcentroids;
h_newcentroids = (int *)malloc(sBytes);
int *h_counter;
h_counter = (int *)malloc(tBytes);
memset(h_newcentroids, 0, sBytes);
memset(h_counter, 0, tBytes);
// malloc device global memory
short int *d_data, *d_centroids;
CHECK(cudaMalloc((short int**)&d_data, nBytes));
CHECK(cudaMalloc((short int**)&d_centroids, cBytes));
int *d_newcentroids;
CHECK(cudaMalloc((int**)&d_newcentroids, sBytes));
int *d_counter;
CHECK(cudaMalloc((int**)&d_counter, tBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_data, h_data, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_centroids, h_centroids, cBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_newcentroids, h_newcentroids, sBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_counter, h_counter, tBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int iLen = 256;
dim3 block (iLen);
dim3 grid (((nElem/Dim + block.x - 1) / block.x));
for (int i=0;i < 20; i++) {
kmeans4<<<grid, block>>>(d_data, d_centroids, d_newcentroids, d_counter, nElem);
CHECK(cudaDeviceSynchronize());
printf("kmeans <<< %d, %d >>> \n", grid.x,
block.x);
// check kernel error
CHECK(cudaGetLastError()) ;
// copy kernel result back to host side
CHECK(cudaMemcpy(h_newcentroids, d_newcentroids, sBytes, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_counter, d_counter, tBytes, cudaMemcpyDeviceToHost));
int total = nElem/Dim;
for (int j=0; j < k*Dim; j++) {
printf(" centroids dim %d value %d \n", j, h_centroids[j]);
h_centroids[j] = (short int) (h_newcentroids[j]/h_counter[j/4]);
if ( j% 4) printf(" counter j %d = %d \n", j/4, h_counter[j/4]);
if ( j% 4 == 0 )total -= h_counter[j/4];
}
if ( total== 0 ) printf(" ---OK------------ \n");
else printf(" ---total %d------------ \n",total);
memset(h_newcentroids, 0, sBytes);
memset(h_counter, 0, tBytes);
CHECK(cudaMemcpy(d_centroids, h_centroids, cBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_newcentroids, h_newcentroids, sBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_counter, h_counter, tBytes, cudaMemcpyHostToDevice));
}
// check device results
// free device global memory
CHECK(cudaFree(d_data));
CHECK(cudaFree(d_centroids));
CHECK(cudaFree(d_newcentroids));
CHECK(cudaFree(d_counter));
// free host memory
free(h_data);
free(h_centroids);
free(h_newcentroids);
free(h_counter);
return(0);
}
|
4f91b63a55b15577bd9089cf60963d535498d352.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "parallel.cuh"
using std::cout;
using std::flush;
using std::endl;
__global__ void plus100Kernel(int *input, int* output)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < 100)
{
output[i] = input[i] + 100;
}
}
void plus100(int n_block, int n_thread)
{
int *d_input = 0;
int *d_output = 0;
hipMalloc((void**)&d_input, 100 * sizeof(int));
hipMalloc((void**)&d_output, 100 * sizeof(int));
srand(time(NULL));
int* matrice = (int*)malloc(sizeof(int) * 100);
for(int i = 0; i < 100; i++)
{
matrice[i] = rand() % 100;
}
// Copier vers le dispositif
hipMemcpy(d_input, matrice, 100 * sizeof(int), hipMemcpyHostToDevice);
// Appeler le kernel avec 100 blocs
hipLaunchKernelGGL(( plus100Kernel), dim3(n_block), dim3(n_thread), 0, 0, d_input, d_output);
// Attendre que le kernel ait fini, puis copier vers l'hte
hipDeviceSynchronize();
hipMemcpy(matrice, d_output, 100 * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
{
printf("%d\n", matrice[i]);
}
}
| 4f91b63a55b15577bd9089cf60963d535498d352.cu | #include <iostream>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "parallel.cuh"
using std::cout;
using std::flush;
using std::endl;
__global__ void plus100Kernel(int *input, int* output)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < 100)
{
output[i] = input[i] + 100;
}
}
void plus100(int n_block, int n_thread)
{
int *d_input = 0;
int *d_output = 0;
cudaMalloc((void**)&d_input, 100 * sizeof(int));
cudaMalloc((void**)&d_output, 100 * sizeof(int));
srand(time(NULL));
int* matrice = (int*)malloc(sizeof(int) * 100);
for(int i = 0; i < 100; i++)
{
matrice[i] = rand() % 100;
}
// Copier vers le dispositif
cudaMemcpy(d_input, matrice, 100 * sizeof(int), cudaMemcpyHostToDevice);
// Appeler le kernel avec 100 blocs
plus100Kernel<<<n_block, n_thread>>>(d_input, d_output);
// Attendre que le kernel ait fini, puis copier vers l'hôte
cudaDeviceSynchronize();
cudaMemcpy(matrice, d_output, 100 * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
{
printf("%d\n", matrice[i]);
}
}
|
1e7b808295d433edd9873284a69f46479594c684.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <fstream>
//#include <iostream>
#include <string.h>
//#include <vector>
#include <stdlib.h>
//#include <unistd.h>
//#include <time.h>
#include <hip/hip_runtime.h>
//#include <mpi.h>
#define uchar unsigned char // 8-bit byte
#define uint unsigned int // 32-bit word
//define for sha256
#define DBL_INT_ADD(a,b,c) if (a > 0xffffffff - (c)) ++b; a += c;
#define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b))))
#define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b))))
#define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z)))
#define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
#define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22))
#define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25))
#define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3))
#define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10))
#define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); }
typedef struct {
uchar data[64];
uint datalen;
uint bitlen[2];
uint state[8];
} SHA256_CTX;
__constant__ uint k[64] = {
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
__constant__ uchar answer[32];
//==============cuda kernel=====================================
__device__ void sha256_transform(SHA256_CTX *ctx, uchar *data){
uint a,b,c,d,e,f,g,h,i,j,t1,t2,m[64];
for (i=0,j=0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) | (data[j+1] << 16) | (data[j+2] << 8) | (data[j+3]);
for ( ; i < 64; ++i)
m[i] = SIG1(m[i-2]) + m[i-7] + SIG0(m[i-15]) + m[i-16];
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
f = ctx->state[5];
g = ctx->state[6];
h = ctx->state[7];
for (i = 0; i < 64; ++i) {
t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i];
t2 = EP0(a) + MAJ(a,b,c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
ctx->state[5] += f;
ctx->state[6] += g;
ctx->state[7] += h;
}
__device__ void sha256(SHA256_CTX *ctx, uchar *data, uchar *hash, int len){
//init sha256 data structure
ctx->datalen = 0;
ctx->bitlen[0] = 0;
ctx->bitlen[1] = 0;
ctx->state[0] = 0x6a09e667;
ctx->state[1] = 0xbb67ae85;
ctx->state[2] = 0x3c6ef372;
ctx->state[3] = 0xa54ff53a;
ctx->state[4] = 0x510e527f;
ctx->state[5] = 0x9b05688c;
ctx->state[6] = 0x1f83d9ab;
ctx->state[7] = 0x5be0cd19;
//update
uint i;
//uint len = 5; //need to fix!!
for (i=0; i < len; ++i) {
ctx->data[ctx->datalen] = data[i];
ctx->datalen++;
if (ctx->datalen == 64) {
sha256_transform(ctx,ctx->data);
DBL_INT_ADD(ctx->bitlen[0],ctx->bitlen[1],512);
ctx->datalen = 0;
}
}
//final
i = ctx->datalen;
if (ctx->datalen < 56) {
ctx->data[i++] = 0x80;
while (i < 56)
ctx->data[i++] = 0x00;
}
else {
ctx->data[i++] = 0x80;
while (i < 64)
ctx->data[i++] = 0x00;
sha256_transform(ctx,ctx->data);
memset(ctx->data,0,56);
}
//par here
DBL_INT_ADD(ctx->bitlen[0],ctx->bitlen[1],ctx->datalen * 8);
ctx->data[63] = ctx->bitlen[0];
ctx->data[62] = ctx->bitlen[0] >> 8;
ctx->data[61] = ctx->bitlen[0] >> 16;
ctx->data[60] = ctx->bitlen[0] >> 24;
ctx->data[59] = ctx->bitlen[1];
ctx->data[58] = ctx->bitlen[1] >> 8;
ctx->data[57] = ctx->bitlen[1] >> 16;
ctx->data[56] = ctx->bitlen[1] >> 24;
sha256_transform(ctx,ctx->data);
//we can paralized at here
for (i=0; i < 4; ++i) {
hash[i] = (ctx->state[0] >> (24-i*8)) & 0x000000ff;
hash[i+4] = (ctx->state[1] >> (24-i*8)) & 0x000000ff;
hash[i+8] = (ctx->state[2] >> (24-i*8)) & 0x000000ff;
hash[i+12] = (ctx->state[3] >> (24-i*8)) & 0x000000ff;
hash[i+16] = (ctx->state[4] >> (24-i*8)) & 0x000000ff;
hash[i+20] = (ctx->state[5] >> (24-i*8)) & 0x000000ff;
hash[i+24] = (ctx->state[6] >> (24-i*8)) & 0x000000ff;
hash[i+28] = (ctx->state[7] >> (24-i*8)) & 0x000000ff;
}
}
__device__ void sha256_hash(SHA256_CTX *ctx, uchar *data, uchar *hash, int len, int round){
sha256(ctx, data, hash, len);
while(round > 1){
sha256(ctx,hash,hash,32);
round --;
}
}
__device__ bool my_strcmp(uchar *str_a, uchar *str_b, uint len){
for(int i=0; i<len; i++){
if(str_a[i] != str_b[i])
return false;
}
return true;
}
__global__ void sha256_wrap(uchar *pwarray, uchar *target, int* pwcount, uchar *result){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
uchar* data = (uchar*)malloc(pwcount[idx]*sizeof(uchar));
SHA256_CTX ctx;// = new SHA256_CTX;
uchar hash[32];
int round = 10000, count = 0;
for(int i=0; i<idx; i++){
count += pwcount[i];
}
memcpy(data,&pwarray[count],pwcount[idx]*sizeof(uchar));
sha256_hash(&ctx,data,hash,pwcount[idx],round);
for (int i=0; i<5;i++){
if(my_strcmp(hash,&target[32*i],32)){
memcpy(result,data,pwcount[0]*sizeof(uchar));
}
}
}
//====================================================================
void print_hash(unsigned char hash[]){
int idx;
for (idx=0; idx < 32; idx++)
printf("%02x",hash[idx]);
printf("\n");
}
extern "C" void hash_pairing(uchar *pwarray, uchar *target, int *pwcount, int num){
uchar *dev_pwarray, *dev_target, *dev_result;
uchar *result = new uchar[32];
int *dev_pwcount;
hipMalloc((void**)&dev_pwarray,strlen((const char*)pwarray));
hipMemcpy((void*)dev_pwarray, pwarray, strlen((const char*)pwarray),hipMemcpyHostToDevice);
hipMalloc((void**)&dev_target,160*sizeof(uchar));
hipMemcpy((void*)dev_target, target, 160*sizeof(uchar),hipMemcpyHostToDevice);
hipMalloc((void**)&dev_result, 32*sizeof(uchar));
// hipMemcpy((void*)dev_result, result, 32*sizeof(uchar), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_pwcount,num*sizeof(int));
hipMemcpy((void*)dev_pwcount,pwcount,num*sizeof(int),hipMemcpyHostToDevice);
dim3 DimBlock(1024,1);
dim3 DimGrid(55,1);
hipLaunchKernelGGL(( sha256_wrap) , dim3(DimGrid), dim3(DimBlock) , 0, 0, dev_pwarray, dev_target, dev_pwcount, dev_result);
hipDeviceSynchronize();
hipMemcpy((void*)result, dev_result, 32*sizeof(uchar), hipMemcpyDeviceToHost);
if(strlen((const char*)result)!=0)
printf("password: %s ", result);
memset(result,0,strlen((const char*) result));
hipDeviceReset();
//hipFree(dev_pwarray); hipFree(dev_target); hipFree(dev_result);
}
| 1e7b808295d433edd9873284a69f46479594c684.cu | #include <stdio.h>
#include <fstream>
//#include <iostream>
#include <string.h>
//#include <vector>
#include <stdlib.h>
//#include <unistd.h>
//#include <time.h>
#include <cuda.h>
//#include <mpi.h>
#define uchar unsigned char // 8-bit byte
#define uint unsigned int // 32-bit word
//define for sha256
#define DBL_INT_ADD(a,b,c) if (a > 0xffffffff - (c)) ++b; a += c;
#define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b))))
#define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b))))
#define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z)))
#define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
#define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22))
#define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25))
#define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3))
#define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10))
#define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); }
typedef struct {
uchar data[64];
uint datalen;
uint bitlen[2];
uint state[8];
} SHA256_CTX;
__constant__ uint k[64] = {
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
__constant__ uchar answer[32];
//==============cuda kernel=====================================
__device__ void sha256_transform(SHA256_CTX *ctx, uchar *data){
uint a,b,c,d,e,f,g,h,i,j,t1,t2,m[64];
for (i=0,j=0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) | (data[j+1] << 16) | (data[j+2] << 8) | (data[j+3]);
for ( ; i < 64; ++i)
m[i] = SIG1(m[i-2]) + m[i-7] + SIG0(m[i-15]) + m[i-16];
a = ctx->state[0];
b = ctx->state[1];
c = ctx->state[2];
d = ctx->state[3];
e = ctx->state[4];
f = ctx->state[5];
g = ctx->state[6];
h = ctx->state[7];
for (i = 0; i < 64; ++i) {
t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i];
t2 = EP0(a) + MAJ(a,b,c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx->state[0] += a;
ctx->state[1] += b;
ctx->state[2] += c;
ctx->state[3] += d;
ctx->state[4] += e;
ctx->state[5] += f;
ctx->state[6] += g;
ctx->state[7] += h;
}
__device__ void sha256(SHA256_CTX *ctx, uchar *data, uchar *hash, int len){
//init sha256 data structure
ctx->datalen = 0;
ctx->bitlen[0] = 0;
ctx->bitlen[1] = 0;
ctx->state[0] = 0x6a09e667;
ctx->state[1] = 0xbb67ae85;
ctx->state[2] = 0x3c6ef372;
ctx->state[3] = 0xa54ff53a;
ctx->state[4] = 0x510e527f;
ctx->state[5] = 0x9b05688c;
ctx->state[6] = 0x1f83d9ab;
ctx->state[7] = 0x5be0cd19;
//update
uint i;
//uint len = 5; //need to fix!!
for (i=0; i < len; ++i) {
ctx->data[ctx->datalen] = data[i];
ctx->datalen++;
if (ctx->datalen == 64) {
sha256_transform(ctx,ctx->data);
DBL_INT_ADD(ctx->bitlen[0],ctx->bitlen[1],512);
ctx->datalen = 0;
}
}
//final
i = ctx->datalen;
if (ctx->datalen < 56) {
ctx->data[i++] = 0x80;
while (i < 56)
ctx->data[i++] = 0x00;
}
else {
ctx->data[i++] = 0x80;
while (i < 64)
ctx->data[i++] = 0x00;
sha256_transform(ctx,ctx->data);
memset(ctx->data,0,56);
}
//par here
DBL_INT_ADD(ctx->bitlen[0],ctx->bitlen[1],ctx->datalen * 8);
ctx->data[63] = ctx->bitlen[0];
ctx->data[62] = ctx->bitlen[0] >> 8;
ctx->data[61] = ctx->bitlen[0] >> 16;
ctx->data[60] = ctx->bitlen[0] >> 24;
ctx->data[59] = ctx->bitlen[1];
ctx->data[58] = ctx->bitlen[1] >> 8;
ctx->data[57] = ctx->bitlen[1] >> 16;
ctx->data[56] = ctx->bitlen[1] >> 24;
sha256_transform(ctx,ctx->data);
//we can paralized at here
for (i=0; i < 4; ++i) {
hash[i] = (ctx->state[0] >> (24-i*8)) & 0x000000ff;
hash[i+4] = (ctx->state[1] >> (24-i*8)) & 0x000000ff;
hash[i+8] = (ctx->state[2] >> (24-i*8)) & 0x000000ff;
hash[i+12] = (ctx->state[3] >> (24-i*8)) & 0x000000ff;
hash[i+16] = (ctx->state[4] >> (24-i*8)) & 0x000000ff;
hash[i+20] = (ctx->state[5] >> (24-i*8)) & 0x000000ff;
hash[i+24] = (ctx->state[6] >> (24-i*8)) & 0x000000ff;
hash[i+28] = (ctx->state[7] >> (24-i*8)) & 0x000000ff;
}
}
__device__ void sha256_hash(SHA256_CTX *ctx, uchar *data, uchar *hash, int len, int round){
sha256(ctx, data, hash, len);
while(round > 1){
sha256(ctx,hash,hash,32);
round --;
}
}
__device__ bool my_strcmp(uchar *str_a, uchar *str_b, uint len){
for(int i=0; i<len; i++){
if(str_a[i] != str_b[i])
return false;
}
return true;
}
__global__ void sha256_wrap(uchar *pwarray, uchar *target, int* pwcount, uchar *result){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
uchar* data = (uchar*)malloc(pwcount[idx]*sizeof(uchar));
SHA256_CTX ctx;// = new SHA256_CTX;
uchar hash[32];
int round = 10000, count = 0;
for(int i=0; i<idx; i++){
count += pwcount[i];
}
memcpy(data,&pwarray[count],pwcount[idx]*sizeof(uchar));
sha256_hash(&ctx,data,hash,pwcount[idx],round);
for (int i=0; i<5;i++){
if(my_strcmp(hash,&target[32*i],32)){
memcpy(result,data,pwcount[0]*sizeof(uchar));
}
}
}
//====================================================================
void print_hash(unsigned char hash[]){
int idx;
for (idx=0; idx < 32; idx++)
printf("%02x",hash[idx]);
printf("\n");
}
extern "C" void hash_pairing(uchar *pwarray, uchar *target, int *pwcount, int num){
uchar *dev_pwarray, *dev_target, *dev_result;
uchar *result = new uchar[32];
int *dev_pwcount;
cudaMalloc((void**)&dev_pwarray,strlen((const char*)pwarray));
cudaMemcpy((void*)dev_pwarray, pwarray, strlen((const char*)pwarray),cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_target,160*sizeof(uchar));
cudaMemcpy((void*)dev_target, target, 160*sizeof(uchar),cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_result, 32*sizeof(uchar));
// cudaMemcpy((void*)dev_result, result, 32*sizeof(uchar), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_pwcount,num*sizeof(int));
cudaMemcpy((void*)dev_pwcount,pwcount,num*sizeof(int),cudaMemcpyHostToDevice);
dim3 DimBlock(1024,1);
dim3 DimGrid(55,1);
sha256_wrap <<< DimGrid, DimBlock >>> (dev_pwarray, dev_target, dev_pwcount, dev_result);
cudaDeviceSynchronize();
cudaMemcpy((void*)result, dev_result, 32*sizeof(uchar), cudaMemcpyDeviceToHost);
if(strlen((const char*)result)!=0)
printf("password: %s ", result);
memset(result,0,strlen((const char*) result));
cudaDeviceReset();
//cudaFree(dev_pwarray); cudaFree(dev_target); cudaFree(dev_result);
}
|
4f9df2e63f58bcd0a77734e6b2f45c79b768fd65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <ATen/ATen.h>
namespace playground {
// --------------------------------------------------------------------------------
void test_0() {
auto output = at::arange(10, at::CUDA(at::kFloat));
std::cout << "output.device: " << output.device() << std::endl;
std::cout << "output: " << output << std::endl;
}
// --------------------------------------------------------------------------------
template <typename scalar_t>
__device__ __forceinline__ static scalar_t bilinear_filter(scalar_t x) {
if (x < 0.0) {
x = -x;
}
if (x < 1.0) {
return static_cast<scalar_t>(1.0) - x;
}
return static_cast<scalar_t>(0.0);
}
template <typename scalar_t>
__device__ __forceinline__ static void _compute_weights(
const int64_t i,
const int64_t input_size,
const scalar_t scale,
const scalar_t support,
scalar_t * wt_ptr,
int64_t interp_size,
int64_t & xmin,
int64_t & xmax) {
scalar_t invscale = (scale >= 1.0) ? 1.0 / scale : 1.0;
scalar_t center = scale * (i + 0.5);
xmin = max(static_cast<int64_t>(center - support + 0.5), static_cast<int64_t>(0));
xmax = min(static_cast<int64_t>(center + support + 0.5), input_size) - xmin;
scalar_t total_w = 0.0;
int64_t j = 0;
for (j = 0; j < xmax; j++) {
scalar_t w = bilinear_filter((j + xmin - center + 0.5) * invscale);
wt_ptr[j] = static_cast<scalar_t>(w);
total_w += w;
}
for (j = 0; j < xmax; j++) {
if (total_w != 0.0) {
wt_ptr[j] /= total_w;
}
}
for (; j < interp_size; j++) {
wt_ptr[j] = static_cast<scalar_t>(0.0);
}
}
template<typename scalar_t>
__global__ void test_1_kernel(
const int n,
at::PackedTensorAccessor64<scalar_t, 1> odata,
int64_t input_size,
scalar_t scale,
scalar_t support
) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index > n) return;
const int interp_size = (int)ceilf(support) * 2 + 1;
extern __shared__ int smem[];
scalar_t * w_ptr = reinterpret_cast<scalar_t*>(smem);
int64_t xmin, xsize;
_compute_weights(0, input_size, scale, support, w_ptr, interp_size, xmin, xsize);
odata[index] = w_ptr[index];
}
void test_1() {
using scalar_t = float;
const int64_t input_size = 64;
const int64_t output_size = 10;
const scalar_t scale = input_size * 1.0 / output_size;
int interp_size = 2;
const scalar_t support = interp_size * 0.5 * scale;
interp_size = (int)ceilf(support) * 2 + 1;
auto output = at::empty(interp_size, at::CUDA(at::kFloat));
auto odata = output.packed_accessor64<scalar_t, 1>();
size_t shmem_size = (interp_size) * sizeof(scalar_t);
hipLaunchKernelGGL(( test_1_kernel<scalar_t>)
, dim3(1), dim3(256), shmem_size, 0,
interp_size,
odata,
input_size,
scale,
support
);
auto output_cpu = output.cpu();
float * o_ptr = (float *) output_cpu.data_ptr();
std::cout << "output: " << std::endl;
for (int i=0; i<interp_size; i++) {
std::cout << o_ptr[i] << " ";
}
std::cout << std::endl;
}
// --------------------------------------------------------------------------------
template<typename scalar_t>
__global__ void test_2_kernel(
const int n,
at::PackedTensorAccessor64<scalar_t, 1> idata,
at::PackedTensorAccessor64<scalar_t, 1> odata,
scalar_t scale,
scalar_t support
) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index > n) return;
const int interp_size = (int)ceilf(support) * 2 + 1;
const int input_size = idata.size(0);
extern __shared__ int smem[];
scalar_t * w_ptr = reinterpret_cast<scalar_t*>(smem) + threadIdx.x * interp_size;
int64_t xmin, xsize;
_compute_weights(index, input_size, scale, support, w_ptr, interp_size, xmin, xsize);
scalar_t t = idata[xmin];
scalar_t wts = w_ptr[0];
scalar_t output = t * wts;
int64_t j = 1 ;
for (; j<xsize; j++) {
wts = w_ptr[j];
t = idata[xmin + j];
output += t * wts;
}
odata[index] = output;
}
void test_2() {
using scalar_t = float;
const int64_t input_size = 64;
const int64_t output_size = 10;
const scalar_t scale = input_size * 1.0 / output_size;
int interp_size = 2;
const scalar_t support = interp_size * 0.5 * scale;
interp_size = (int)ceilf(support) * 2 + 1;
auto input = at::arange(input_size, at::CUDA(at::kFloat));
auto idata = input.packed_accessor64<scalar_t, 1>();
auto output = at::empty(output_size, at::CUDA(at::kFloat));
auto odata = output.packed_accessor64<scalar_t, 1>();
size_t n_threads = 256;
size_t shmem_size = n_threads * (interp_size) * sizeof(scalar_t);
hipLaunchKernelGGL(( test_2_kernel<scalar_t>)
, dim3(1), dim3(n_threads), shmem_size, 0,
output_size,
idata,
odata,
scale,
support
);
auto output_cpu = output.cpu();
float * o_ptr = (float *) output_cpu.data_ptr();
std::cout << "output: " << std::endl;
for (int i=0; i<output_size; i++) {
std::cout << o_ptr[i] << " ";
}
std::cout << std::endl;
}
// --------------------------------------------------------------------------------
template<typename scalar_t>
__global__ void test_3_kernel(
const int n,
at::PackedTensorAccessor64<scalar_t, 1> idata,
at::PackedTensorAccessor64<scalar_t, 1> odata,
scalar_t scale,
scalar_t support
) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index > n) return;
const int interp_size = (int)ceilf(support) * 2 + 1;
const int input_size = idata.size(0);
// static instead of dynamic shared memory
// max supported scale is 127
scalar_t weights[256];
int64_t xmin, xsize;
_compute_weights(index, input_size, scale, support, weights, interp_size, xmin, xsize);
scalar_t t = idata[xmin];
scalar_t wts = weights[0];
scalar_t output = t * wts;
int64_t j = 1 ;
for (; j<xsize; j++) {
wts = weights[j];
t = idata[xmin + j];
output += t * wts;
}
odata[index] = output;
}
void test_3() {
using scalar_t = float;
const int64_t input_size = 64;
const int64_t output_size = 10;
const scalar_t scale = input_size * 1.0 / output_size;
int interp_size = 2;
const scalar_t support = interp_size * 0.5 * scale;
interp_size = (int)ceilf(support) * 2 + 1;
auto input = at::arange(input_size, at::CUDA(at::kFloat));
auto idata = input.packed_accessor64<scalar_t, 1>();
auto output = at::empty(output_size, at::CUDA(at::kFloat));
auto odata = output.packed_accessor64<scalar_t, 1>();
size_t n_threads = 256;
hipLaunchKernelGGL(( test_3_kernel<scalar_t>)
, dim3(1), dim3(n_threads), 0, 0,
output_size,
idata,
odata,
scale,
support
);
auto output_cpu = output.cpu();
float * o_ptr = (float *) output_cpu.data_ptr();
std::cout << "output: " << std::endl;
for (int i=0; i<output_size; i++) {
std::cout << o_ptr[i] << " ";
}
std::cout << std::endl;
}
// --------------------------------------------------------------------------------
} | 4f9df2e63f58bcd0a77734e6b2f45c79b768fd65.cu | #include <iostream>
#include <ATen/ATen.h>
namespace playground {
// --------------------------------------------------------------------------------
void test_0() {
auto output = at::arange(10, at::CUDA(at::kFloat));
std::cout << "output.device: " << output.device() << std::endl;
std::cout << "output: " << output << std::endl;
}
// --------------------------------------------------------------------------------
template <typename scalar_t>
__device__ __forceinline__ static scalar_t bilinear_filter(scalar_t x) {
if (x < 0.0) {
x = -x;
}
if (x < 1.0) {
return static_cast<scalar_t>(1.0) - x;
}
return static_cast<scalar_t>(0.0);
}
template <typename scalar_t>
__device__ __forceinline__ static void _compute_weights(
const int64_t i,
const int64_t input_size,
const scalar_t scale,
const scalar_t support,
scalar_t * wt_ptr,
int64_t interp_size,
int64_t & xmin,
int64_t & xmax) {
scalar_t invscale = (scale >= 1.0) ? 1.0 / scale : 1.0;
scalar_t center = scale * (i + 0.5);
xmin = max(static_cast<int64_t>(center - support + 0.5), static_cast<int64_t>(0));
xmax = min(static_cast<int64_t>(center + support + 0.5), input_size) - xmin;
scalar_t total_w = 0.0;
int64_t j = 0;
for (j = 0; j < xmax; j++) {
scalar_t w = bilinear_filter((j + xmin - center + 0.5) * invscale);
wt_ptr[j] = static_cast<scalar_t>(w);
total_w += w;
}
for (j = 0; j < xmax; j++) {
if (total_w != 0.0) {
wt_ptr[j] /= total_w;
}
}
for (; j < interp_size; j++) {
wt_ptr[j] = static_cast<scalar_t>(0.0);
}
}
template<typename scalar_t>
__global__ void test_1_kernel(
const int n,
at::PackedTensorAccessor64<scalar_t, 1> odata,
int64_t input_size,
scalar_t scale,
scalar_t support
) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index > n) return;
const int interp_size = (int)ceilf(support) * 2 + 1;
extern __shared__ int smem[];
scalar_t * w_ptr = reinterpret_cast<scalar_t*>(smem);
int64_t xmin, xsize;
_compute_weights(0, input_size, scale, support, w_ptr, interp_size, xmin, xsize);
odata[index] = w_ptr[index];
}
void test_1() {
using scalar_t = float;
const int64_t input_size = 64;
const int64_t output_size = 10;
const scalar_t scale = input_size * 1.0 / output_size;
int interp_size = 2;
const scalar_t support = interp_size * 0.5 * scale;
interp_size = (int)ceilf(support) * 2 + 1;
auto output = at::empty(interp_size, at::CUDA(at::kFloat));
auto odata = output.packed_accessor64<scalar_t, 1>();
size_t shmem_size = (interp_size) * sizeof(scalar_t);
test_1_kernel<scalar_t>
<<<1, 256, shmem_size>>>(
interp_size,
odata,
input_size,
scale,
support
);
auto output_cpu = output.cpu();
float * o_ptr = (float *) output_cpu.data_ptr();
std::cout << "output: " << std::endl;
for (int i=0; i<interp_size; i++) {
std::cout << o_ptr[i] << " ";
}
std::cout << std::endl;
}
// --------------------------------------------------------------------------------
template<typename scalar_t>
__global__ void test_2_kernel(
const int n,
at::PackedTensorAccessor64<scalar_t, 1> idata,
at::PackedTensorAccessor64<scalar_t, 1> odata,
scalar_t scale,
scalar_t support
) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index > n) return;
const int interp_size = (int)ceilf(support) * 2 + 1;
const int input_size = idata.size(0);
extern __shared__ int smem[];
scalar_t * w_ptr = reinterpret_cast<scalar_t*>(smem) + threadIdx.x * interp_size;
int64_t xmin, xsize;
_compute_weights(index, input_size, scale, support, w_ptr, interp_size, xmin, xsize);
scalar_t t = idata[xmin];
scalar_t wts = w_ptr[0];
scalar_t output = t * wts;
int64_t j = 1 ;
for (; j<xsize; j++) {
wts = w_ptr[j];
t = idata[xmin + j];
output += t * wts;
}
odata[index] = output;
}
void test_2() {
using scalar_t = float;
const int64_t input_size = 64;
const int64_t output_size = 10;
const scalar_t scale = input_size * 1.0 / output_size;
int interp_size = 2;
const scalar_t support = interp_size * 0.5 * scale;
interp_size = (int)ceilf(support) * 2 + 1;
auto input = at::arange(input_size, at::CUDA(at::kFloat));
auto idata = input.packed_accessor64<scalar_t, 1>();
auto output = at::empty(output_size, at::CUDA(at::kFloat));
auto odata = output.packed_accessor64<scalar_t, 1>();
size_t n_threads = 256;
size_t shmem_size = n_threads * (interp_size) * sizeof(scalar_t);
test_2_kernel<scalar_t>
<<<1, n_threads, shmem_size>>>(
output_size,
idata,
odata,
scale,
support
);
auto output_cpu = output.cpu();
float * o_ptr = (float *) output_cpu.data_ptr();
std::cout << "output: " << std::endl;
for (int i=0; i<output_size; i++) {
std::cout << o_ptr[i] << " ";
}
std::cout << std::endl;
}
// --------------------------------------------------------------------------------
template<typename scalar_t>
__global__ void test_3_kernel(
const int n,
at::PackedTensorAccessor64<scalar_t, 1> idata,
at::PackedTensorAccessor64<scalar_t, 1> odata,
scalar_t scale,
scalar_t support
) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index > n) return;
const int interp_size = (int)ceilf(support) * 2 + 1;
const int input_size = idata.size(0);
// static instead of dynamic shared memory
// max supported scale is 127
scalar_t weights[256];
int64_t xmin, xsize;
_compute_weights(index, input_size, scale, support, weights, interp_size, xmin, xsize);
scalar_t t = idata[xmin];
scalar_t wts = weights[0];
scalar_t output = t * wts;
int64_t j = 1 ;
for (; j<xsize; j++) {
wts = weights[j];
t = idata[xmin + j];
output += t * wts;
}
odata[index] = output;
}
void test_3() {
using scalar_t = float;
const int64_t input_size = 64;
const int64_t output_size = 10;
const scalar_t scale = input_size * 1.0 / output_size;
int interp_size = 2;
const scalar_t support = interp_size * 0.5 * scale;
interp_size = (int)ceilf(support) * 2 + 1;
auto input = at::arange(input_size, at::CUDA(at::kFloat));
auto idata = input.packed_accessor64<scalar_t, 1>();
auto output = at::empty(output_size, at::CUDA(at::kFloat));
auto odata = output.packed_accessor64<scalar_t, 1>();
size_t n_threads = 256;
test_3_kernel<scalar_t>
<<<1, n_threads>>>(
output_size,
idata,
odata,
scale,
support
);
auto output_cpu = output.cpu();
float * o_ptr = (float *) output_cpu.data_ptr();
std::cout << "output: " << std::endl;
for (int i=0; i<output_size; i++) {
std::cout << o_ptr[i] << " ";
}
std::cout << std::endl;
}
// --------------------------------------------------------------------------------
} |
4d8e35fc3de587a56a94fc78320d93fa7f3ef97b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMath.cu"
#else
THC_API void
THCTensor_(fill)(THCState* state, THCTensor *self_, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<real>(
state, self_, TensorFillOp<real>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(hipMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(real) * THCTensor_(nElement)(state, self_),
THCState_getCurrentStream(state)));
} else {
if (!THC_pointwiseApply1<real>(
state, self_,
TensorFillOp<real>(ScalarConvert<int, real>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(zeros)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(zero)(state, r_);
}
THC_API void
THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(zero)(state, r_);
}
THC_API void
THCTensor_(ones)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1));
}
THC_API void
THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1));
}
THC_API void
THCTensor_(reshape)(THCState *state, THCTensor *r_, THCTensor *t, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, t));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(copy)(state, r_, t);
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(cat)(THCState *state, THCTensor *result,
THCTensor *ta, THCTensor *tb, int dimension)
{
THCTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCTensor_(catArray)(state, result, inputs, 2, dimension);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = THCTensor_(nDimension)(state, first);
int second_dims = THCTensor_(nDimension)(state, second);
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(catArray)(THCState *state, THCTensor *result,
THCTensor **inputs, int numInputs, int dimension)
{
THLongStorage *size;
int i, j, cohortMax;
int64_t offset;
bool hasEmptyInput = false;
THCTensor *notEmptyTensor = NULL;
// Even in the case where dimension is negative (i.e. when we want
// to cat along the last dimension), this logic still works, as the
// loop below will overwrite the value
int nDims = dimension + 1;
// cat_dimension is the actual dimension we cat along
int cat_dimension = dimension;
for (i = 0; i < numInputs; i++)
{
int inputDim = THCTensor_(nDimension)(state, inputs[i]);
hasEmptyInput |= !inputDim;
if (inputDim > 0) {
nDims = inputDim;
notEmptyTensor = inputs[i];
}
}
// If all inputs are empty tensors, return an empty tensor
if (notEmptyTensor == NULL) {
return;
}
// In the event that the user specified -1 as the concat dimension, then
// we want to pick the nDims as dimension to cat along (and thus nDims - 1 as the
// value due to 0-based indexing). If the nDims is // 0 (i.e. we are catting all
// empty tensors), then we set cat_dimension to be 0
if (dimension + TH_INDEX_BASE == -1) {
cat_dimension = nDims ? (nDims - 1) : 0;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE);
size = THLongStorage_newWithSize(nDims);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THCTensor *tensor = inputs[i];
if (THCTensor_(nDimension)(state, tensor) == 0) {
continue;
}
THCTensor_(check_shape_except_dim)(state, notEmptyTensor, tensor, cat_dimension);
cat_dim_size += THCTensor_(size)(state, tensor, cat_dimension);
}
// Compute the size of the result
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = THCTensor_(size)(state, notEmptyTensor, dim);
if (dim == cat_dimension) {
result_dim_size = cat_dim_size;
}
THLongStorage_data(size)[dim] = result_dim_size;
}
THCTensor_(resize)(state, result, size, NULL);
THLongStorage_free(size);
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The result tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
// 7. All input tensors are on the same device
if (numInputs > 1 &&
!hasEmptyInput &&
THCTensor_(nDimension)(state, result) <= CAT_ARRAY_MAX_INPUT_DIMS &&
THCTensor_canUse32BitIndexMath(state, result) &&
THCTensor_allContiguous(state, (const _THCTensor **)inputs, numInputs) &&
THCTensor_all32BitIndexable(state, (const _THCTensor **)inputs, numInputs) &&
THCTensor_allSameDevice(state, (const _THCTensor **)inputs, numInputs)) {
// First, let's set up our kernel parameters. We start with a raw pointer to the storage
// for the output Tensor.
real *data = THCTensor_(data)(state, result);
// Kernel Parameter
size_t tensorMetadataSize = sizeof(CatArrInputTensor<real, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
CatArrInputTensor<real, unsigned int> *d_inputs;
THCudaCheck(THCudaMalloc(state, (void**) &d_inputs, tensorMetadataSize));
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
for (i = 0; i < nDims; ++i) {
param.outputSize[i] = THCTensor_(size)(state, result, i);
param.outputStride[i] = THCTensor_(stride)(state, result, i);
}
THCStream* stream = THCState_getStream(state);
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
hipLaunchKernelGGL(( CatArrayBatchedCopy<real, unsigned int, DIMS>), dim3(catGrid), dim3(applyBlock), 0, stream->stream, data, d_inputs, param, cat_dimension, param.outputStride[cat_dimension]);
// Now we loop
offset = 0;
for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
CatArrInputTensor<real, unsigned int>* stackInputs = (CatArrInputTensor<real, unsigned int>*) THCudaHostAlloc(state, tensorMetadataSize);
cohortMax = 0;
for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) {
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[i+j])
? THCTensor_(size)(state, inputs[i+j], cat_dimension)
: 1;
stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]);
stackInputs[j].offset = offset;
stackInputs[j].dimSize = dimSize;
stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]);
cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements;
// update offset
offset += dimSize;
}
THCudaCheck(hipMemcpyAsync(
d_inputs,
stackInputs,
j * sizeof(CatArrInputTensor<real, unsigned int>),
hipMemcpyHostToDevice,
stream->stream));
THCudaHostRecord(state, stackInputs);
THCudaHostFree(state, stackInputs);
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = getApplyBlock();
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(state, j, catGrid);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
THCudaCheck(hipGetLastError());
}
THCudaCheck(THCudaFree(state, d_inputs));
#undef HANDLE_CASE
} else {
offset = 0;
for (j = 0; j < numInputs; j++)
{
// No reason to copy when input is empty
if (!THCTensor_(nDimension)(state, inputs[j])) continue;
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[j])
? THCTensor_(size)(state, inputs[j], cat_dimension)
: 1;
THCTensor *nt = THCTensor_(newWithTensor)(state, result);
THCTensor_(narrow)(state, nt, NULL, cat_dimension, offset, dimSize);
THCTensor_(copy)(state, nt, inputs[j]);
THCTensor_(free)(state, nt);
offset += dimSize;
}
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int64_t N = THCTensor_(nElement)(state, self);
THCudaLongTensor_resize2d(state, tensor, N, num_dim);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim, num_dim);
#if TORCH_HIP_VERSION >= 7000
hipStream_t stream = THCState_getCurrentStream(state);
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<real>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, self->size[dim])
);
div *= self->size[dim];
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(hipGetLastError());
}
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyFromDiagonal<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THCTensor_(stride)(state, src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyToDiagonal<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THCTensor_(resize2d)(state, self_, n, m);
THCTensor_(zero)(state, self_);
int64_t sz = THMin(n, m);
int64_t stride = THCTensor_(stride)(state, self_, 0) +
THCTensor_(stride)(state, self_, 1);
THCTensor *diag = THCTensor_(newWithStorage1d)(state, self_->storage,
self_->storageOffset, sz, stride);
THCTensor_(fill)(state, diag, ScalarConvert<int, real>::to(1));
THCTensor_(free)(state, diag);
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((src_->nDimension == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, a);
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_ // if r_ is contiguous we can direct work on it
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LinspaceOp<real> linspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, linspace_method);
if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<real>::exp10(a));
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LogspaceOp<real> logspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, logspace_method);
if (!THCTensor_(isContiguous)(state, r_)) {
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(hipGetLastError());
}
#endif
void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(hipGetLastError());
}
void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(hipGetLastError());
}
#endif
| 4d8e35fc3de587a56a94fc78320d93fa7f3ef97b.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMath.cu"
#else
THC_API void
THCTensor_(fill)(THCState* state, THCTensor *self_, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<real>(
state, self_, TensorFillOp<real>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(real) * THCTensor_(nElement)(state, self_),
THCState_getCurrentStream(state)));
} else {
if (!THC_pointwiseApply1<real>(
state, self_,
TensorFillOp<real>(ScalarConvert<int, real>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(zeros)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(zero)(state, r_);
}
THC_API void
THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(zero)(state, r_);
}
THC_API void
THCTensor_(ones)(THCState *state, THCTensor *r_, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1));
}
THC_API void
THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1));
}
THC_API void
THCTensor_(reshape)(THCState *state, THCTensor *r_, THCTensor *t, THLongStorage *size)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, t));
THCTensor_(resize)(state, r_, size, NULL);
THCTensor_(copy)(state, r_, t);
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(cat)(THCState *state, THCTensor *result,
THCTensor *ta, THCTensor *tb, int dimension)
{
THCTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCTensor_(catArray)(state, result, inputs, 2, dimension);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = THCTensor_(nDimension)(state, first);
int second_dims = THCTensor_(nDimension)(state, second);
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(catArray)(THCState *state, THCTensor *result,
THCTensor **inputs, int numInputs, int dimension)
{
THLongStorage *size;
int i, j, cohortMax;
int64_t offset;
bool hasEmptyInput = false;
THCTensor *notEmptyTensor = NULL;
// Even in the case where dimension is negative (i.e. when we want
// to cat along the last dimension), this logic still works, as the
// loop below will overwrite the value
int nDims = dimension + 1;
// cat_dimension is the actual dimension we cat along
int cat_dimension = dimension;
for (i = 0; i < numInputs; i++)
{
int inputDim = THCTensor_(nDimension)(state, inputs[i]);
hasEmptyInput |= !inputDim;
if (inputDim > 0) {
nDims = inputDim;
notEmptyTensor = inputs[i];
}
}
// If all inputs are empty tensors, return an empty tensor
if (notEmptyTensor == NULL) {
return;
}
// In the event that the user specified -1 as the concat dimension, then
// we want to pick the nDims as dimension to cat along (and thus nDims - 1 as the
// value due to 0-based indexing). If the nDims is // 0 (i.e. we are catting all
// empty tensors), then we set cat_dimension to be 0
if (dimension + TH_INDEX_BASE == -1) {
cat_dimension = nDims ? (nDims - 1) : 0;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE);
size = THLongStorage_newWithSize(nDims);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THCTensor *tensor = inputs[i];
if (THCTensor_(nDimension)(state, tensor) == 0) {
continue;
}
THCTensor_(check_shape_except_dim)(state, notEmptyTensor, tensor, cat_dimension);
cat_dim_size += THCTensor_(size)(state, tensor, cat_dimension);
}
// Compute the size of the result
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = THCTensor_(size)(state, notEmptyTensor, dim);
if (dim == cat_dimension) {
result_dim_size = cat_dim_size;
}
THLongStorage_data(size)[dim] = result_dim_size;
}
THCTensor_(resize)(state, result, size, NULL);
THLongStorage_free(size);
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The result tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
// 7. All input tensors are on the same device
if (numInputs > 1 &&
!hasEmptyInput &&
THCTensor_(nDimension)(state, result) <= CAT_ARRAY_MAX_INPUT_DIMS &&
THCTensor_canUse32BitIndexMath(state, result) &&
THCTensor_allContiguous(state, (const _THCTensor **)inputs, numInputs) &&
THCTensor_all32BitIndexable(state, (const _THCTensor **)inputs, numInputs) &&
THCTensor_allSameDevice(state, (const _THCTensor **)inputs, numInputs)) {
// First, let's set up our kernel parameters. We start with a raw pointer to the storage
// for the output Tensor.
real *data = THCTensor_(data)(state, result);
// Kernel Parameter
size_t tensorMetadataSize = sizeof(CatArrInputTensor<real, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
CatArrInputTensor<real, unsigned int> *d_inputs;
THCudaCheck(THCudaMalloc(state, (void**) &d_inputs, tensorMetadataSize));
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
for (i = 0; i < nDims; ++i) {
param.outputSize[i] = THCTensor_(size)(state, result, i);
param.outputStride[i] = THCTensor_(stride)(state, result, i);
}
THCStream* stream = THCState_getStream(state);
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
CatArrayBatchedCopy<real, unsigned int, DIMS><<<catGrid, applyBlock, 0, stream->stream>>>(data, d_inputs, param, cat_dimension, param.outputStride[cat_dimension]);
// Now we loop
offset = 0;
for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
CatArrInputTensor<real, unsigned int>* stackInputs = (CatArrInputTensor<real, unsigned int>*) THCudaHostAlloc(state, tensorMetadataSize);
cohortMax = 0;
for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) {
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[i+j])
? THCTensor_(size)(state, inputs[i+j], cat_dimension)
: 1;
stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]);
stackInputs[j].offset = offset;
stackInputs[j].dimSize = dimSize;
stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]);
cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements;
// update offset
offset += dimSize;
}
THCudaCheck(cudaMemcpyAsync(
d_inputs,
stackInputs,
j * sizeof(CatArrInputTensor<real, unsigned int>),
cudaMemcpyHostToDevice,
stream->stream));
THCudaHostRecord(state, stackInputs);
THCudaHostFree(state, stackInputs);
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = getApplyBlock();
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(state, j, catGrid);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
THCudaCheck(cudaGetLastError());
}
THCudaCheck(THCudaFree(state, d_inputs));
#undef HANDLE_CASE
} else {
offset = 0;
for (j = 0; j < numInputs; j++)
{
// No reason to copy when input is empty
if (!THCTensor_(nDimension)(state, inputs[j])) continue;
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[j])
? THCTensor_(size)(state, inputs[j], cat_dimension)
: 1;
THCTensor *nt = THCTensor_(newWithTensor)(state, result);
THCTensor_(narrow)(state, nt, NULL, cat_dimension, offset, dimSize);
THCTensor_(copy)(state, nt, inputs[j]);
THCTensor_(free)(state, nt);
offset += dimSize;
}
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int64_t N = THCTensor_(nElement)(state, self);
THCudaLongTensor_resize2d(state, tensor, N, num_dim);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim, num_dim);
#if CUDA_VERSION >= 7000
cudaStream_t stream = THCState_getCurrentStream(state);
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<real>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, self->size[dim])
);
div *= self->size[dim];
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(cudaGetLastError());
}
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyFromDiagonal<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THCTensor_(stride)(state, src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyToDiagonal<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THCTensor_(resize2d)(state, self_, n, m);
THCTensor_(zero)(state, self_);
int64_t sz = THMin(n, m);
int64_t stride = THCTensor_(stride)(state, self_, 0) +
THCTensor_(stride)(state, self_, 1);
THCTensor *diag = THCTensor_(newWithStorage1d)(state, self_->storage,
self_->storageOffset, sz, stride);
THCTensor_(fill)(state, diag, ScalarConvert<int, real>::to(1));
THCTensor_(free)(state, diag);
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((src_->nDimension == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, a);
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_ // if r_ is contiguous we can direct work on it
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LinspaceOp<real> linspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, linspace_method);
if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<real>::exp10(a));
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LogspaceOp<real> logspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, logspace_method);
if (!THCTensor_(isContiguous)(state, r_)) {
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(cudaGetLastError());
}
#endif
void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(cudaGetLastError());
}
void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(cudaGetLastError());
}
#endif
|
191ad76b9d2783aed5a9bfe85503e63f0d18013e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gpuinflate.hpp"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <hipcub/hipcub.hpp>
namespace cudf {
namespace io {
constexpr int32_t batch_size = (1 << 5);
constexpr int32_t batch_count = (1 << 2);
constexpr int32_t prefetch_size = (1 << 9); // 512B, in 32B chunks
constexpr bool log_cyclecount = false;
void __device__ busy_wait(size_t cycles)
{
clock_t start = clock();
for (;;) {
clock_t const now = clock();
clock_t const elapsed = now > start ? now - start : now + (0xffffffff - start);
if (elapsed >= cycles) return;
}
}
/**
* @brief Describes a single LZ77 symbol (single entry in batch)
*/
struct unsnap_batch_s {
int32_t len; // 1..64 = Number of bytes
uint32_t
offset; // copy distance if greater than zero or negative of literal offset in byte stream
};
/**
* @brief Queue structure used to exchange data between warps
*/
struct unsnap_queue_s {
uint32_t prefetch_wrpos; ///< Prefetcher write position
uint32_t prefetch_rdpos; ///< Prefetch consumer read position
int32_t prefetch_end; ///< Prefetch enable flag (nonzero stops prefetcher)
int32_t batch_len[batch_count]; ///< Length of each batch - <0:end, 0:not ready, >0:symbol count
unsnap_batch_s batch[batch_count * batch_size]; ///< LZ77 batch data
uint8_t buf[prefetch_size]; ///< Prefetch buffer
};
/**
* @brief snappy decompression state
*/
struct unsnap_state_s {
const uint8_t* base; ///< base ptr of compressed stream
const uint8_t* end; ///< end of compressed stream
uint32_t uncompressed_size; ///< uncompressed stream size
uint32_t bytes_left; ///< remaining bytes to decompress
int32_t error; ///< current error status
uint32_t tstart; ///< start time for perf logging
volatile unsnap_queue_s q; ///< queue for cross-warp communication
device_span<uint8_t const> src; ///< input for current block
device_span<uint8_t> dst; ///< output for current block
};
inline __device__ volatile uint8_t& byte_access(unsnap_state_s* s, uint32_t pos)
{
return s->q.buf[pos & (prefetch_size - 1)];
}
/**
* @brief prefetches data for the symbol decoding stage
*
* @param s decompression state
* @param t warp lane id
*/
__device__ void snappy_prefetch_bytestream(unsnap_state_s* s, int t)
{
const uint8_t* base = s->base;
auto end = (uint32_t)(s->end - base);
auto align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base)));
int32_t pos = min(align_bytes, end);
int32_t blen;
// Start by prefetching up to the next a 32B-aligned location
if (t < pos) { s->q.buf[t] = base[t]; }
blen = 0;
do {
__syncwarp();
if (!t) {
uint32_t minrdpos;
s->q.prefetch_wrpos = pos;
minrdpos = pos - min(pos, prefetch_size - 32u);
blen = (int)min(32u, end - pos);
for (;;) {
uint32_t rdpos = s->q.prefetch_rdpos;
if (rdpos >= minrdpos) break;
if (s->q.prefetch_end) {
blen = 0;
break;
}
busy_wait(20);
}
}
blen = shuffle(blen);
if (t < blen) { byte_access(s, pos + t) = base[pos + t]; }
pos += blen;
} while (blen > 0);
}
/**
* @brief Lookup table for get_len3_mask()
*
* Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of
* 3-byte code lengths in the lower 4 bits, along with the total number of
* bytes used for coding the four lengths in the upper 4 bits.
* The upper 4-bit value could also be obtained by 8+__popc(mask4)
*
* for (uint32_t k = 0; k < 1024; k++)
* {
* for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++)
* {
* v |= (b & 1) << i;
* n += (b & 1) + 2;
* b >>= (b & 1) + 2;
* }
* k_len3lut[k] = v | (n << 4);
* }
*/
static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = {
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xb7, 0x94, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xb7, 0xac, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xb7, 0x94, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xb7, 0xac, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xcf, 0x94, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xcf, 0xac, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xcf, 0x94, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xcf, 0xac, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf};
/**
* @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte
* code length, given an input mask of up to 96 bits.
*
* Implemented by doing 8 consecutive lookups, building the result 4-bit at a time
*/
inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2)
{
uint32_t m, v, m4, n;
v = v0;
m4 = k_len3lut[v & 0x3ff];
m = m4 & 0xf;
n = m4 >> 4; // 8..12
v = v0 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 4;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v0, v1, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 8;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 12;
n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16
v1 = __funnelshift_r(v1, v2, n);
v2 >>= n;
v = v1;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 16;
n = m4 >> 4; // 8..12
v = v1 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 20;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v1, v2, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 24;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 28;
return m;
}
/**
* @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length
* minus 2, given two input masks each containing bit0 or bit1 of the corresponding
* code length minus 2 for up to 32 bytes
*/
inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1)
{
uint32_t m;
m = (v1 & 1) * 2 + (v0 & 1);
v0 >>= (m + 2);
v1 >>= (m + 1);
for (uint32_t i = 1; i < 16; i++) {
uint32_t m2 = (v1 & 2) | (v0 & 1);
uint32_t n = m2 + 2;
m |= m2 << (i * 2);
v0 >>= n;
v1 >>= n;
}
return m;
}
/**
* @brief decode symbols and output LZ77 batches (single-warp)
*
* @param s decompression state
* @param t warp lane id
*/
__device__ void snappy_decode_symbols(unsnap_state_s* s, uint32_t t)
{
uint32_t cur = 0;
auto end = static_cast<uint32_t>(s->end - s->base);
uint32_t bytes_left = s->uncompressed_size;
uint32_t dst_pos = 0;
int32_t batch = 0;
for (;;) {
int32_t batch_len;
volatile unsnap_batch_s* b;
// Wait for prefetcher
if (t == 0) {
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * batch_size, end)) {
busy_wait(10);
}
b = &s->q.batch[batch * batch_size];
}
// Process small symbols in parallel: for data that does not get good compression,
// the stream will consist of a large number of short literals (1-byte or 2-byte)
// followed by short repeat runs. This results in many 2-byte or 3-byte symbols
// that can all be decoded in parallel once we know the symbol length.
{
uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask;
uint32_t b0;
cur = shuffle(cur);
cur_t = cur + t;
b0 = byte_access(s, cur_t);
v0 = ballot((b0 == 4) || (b0 & 2));
b0 = byte_access(s, cur_t + 32);
v1 = ballot((b0 == 4) || (b0 & 2));
b0 = byte_access(s, cur_t + 64);
v2 = ballot((b0 == 4) || (b0 & 2));
len3_mask = shuffle((t == 0) ? get_len3_mask(v0, v1, v2) : 0);
cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1));
b0 = byte_access(s, cur_t);
is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0);
short_sym_mask = ballot(is_long_sym);
batch_len = 0;
b = reinterpret_cast<volatile unsnap_batch_s*>(shuffle(reinterpret_cast<uintptr_t>(b)));
if (!(short_sym_mask & 1)) {
batch_len = shuffle((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0);
if (batch_len != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_len) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | byte_access(s, cur_t + 1)
: (b0 & 2) ? byte_access(s, cur_t + 1) | (byte_access(s, cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[t].len = blen;
b[t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = shuffle(bytes_left);
dst_pos = shuffle(dst_pos);
short_sym_mask = __ffs(ballot(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); }
if (batch_len != 0) {
blen = shuffle(blen, batch_len - 1);
cur = shuffle(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
}
}
}
// Check if the batch was stopped by a 3-byte or 4-byte literal
if (batch_len < batch_size - 2 && shuffle(b0 & ~4, batch_len) == 8) {
// If so, run a slower version of the above that can also handle 3/4-byte literal sequences
uint32_t batch_add;
do {
uint32_t clen, mask_t;
cur_t = cur + t;
b0 = byte_access(s, cur_t);
clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2
v0 = ballot(clen & 1);
v1 = ballot((clen >> 1) & 1);
len3_mask = shuffle((t == 0) ? get_len5_mask(v0, v1) : 0);
mask_t = (1 << (2 * t)) - 1;
cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaaaaaa) & mask_t) +
__popc((len3_mask & 0x55555555) & mask_t);
b0 = byte_access(s, cur_t);
is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3 * 4)) || (cur_t >= cur + 32) ||
(batch_len + t >= batch_size);
batch_add = __ffs(ballot(is_long_sym)) - 1;
if (batch_add != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_add) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | byte_access(s, cur_t + 1)
: (b0 & 2) ? byte_access(s, cur_t + 1) | (byte_access(s, cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[batch_len + t].len = blen;
b[batch_len + t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = shuffle(bytes_left);
dst_pos = shuffle(dst_pos);
short_sym_mask = __ffs(ballot(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); }
if (batch_add != 0) {
blen = shuffle(blen, batch_add - 1);
cur = shuffle(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add - 1) * 2)) & 3);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
batch_len += batch_add;
}
}
} while (batch_add >= 6 && batch_len < batch_size - 2);
}
}
if (t == 0) {
while (bytes_left > 0 && batch_len < batch_size) {
uint32_t blen, offset;
uint8_t b0 = byte_access(s, cur);
if (b0 & 3) {
uint8_t b1 = byte_access(s, cur + 1);
if (!(b0 & 2)) {
// xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset
offset = ((b0 & 0xe0) << 3) | b1;
blen = ((b0 >> 2) & 7) + 4;
cur += 2;
} else {
// xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset
offset = b1 | (byte_access(s, cur + 2) << 8);
if (b0 & 1) // 4-byte offset
{
offset |= (byte_access(s, cur + 3) << 16) | (byte_access(s, cur + 4) << 24);
cur += 5;
} else {
cur += 3;
}
blen = (b0 >> 2) + 1;
}
dst_pos += blen;
if (offset - 1u >= dst_pos || bytes_left < blen) break;
bytes_left -= blen;
} else if (b0 < 4 * 4) {
// 0000xx00: short literal
blen = (b0 >> 2) + 1;
offset = -(int32_t)(cur + 1);
cur += 1 + blen;
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
} else {
// xxxxxx00: literal
blen = b0 >> 2;
if (blen >= 60) {
uint32_t num_bytes = blen - 59;
blen = byte_access(s, cur + 1);
if (num_bytes > 1) {
blen |= byte_access(s, cur + 2) << 8;
if (num_bytes > 2) {
blen |= byte_access(s, cur + 3) << 16;
if (num_bytes > 3) { blen |= byte_access(s, cur + 4) << 24; }
}
}
cur += num_bytes;
}
cur += 1;
blen += 1;
offset = -(int32_t)cur;
cur += blen;
// Wait for prefetcher
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * batch_size, end)) {
busy_wait(10);
}
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
}
b[batch_len].len = blen;
b[batch_len].offset = offset;
batch_len++;
}
if (batch_len != 0) {
s->q.batch_len[batch] = batch_len;
batch = (batch + 1) & (batch_count - 1);
}
}
batch_len = shuffle(batch_len);
if (t == 0) {
while (s->q.batch_len[batch] != 0) {
busy_wait(20);
}
}
if (batch_len != batch_size) { break; }
}
if (!t) {
s->q.prefetch_end = 1;
s->q.batch_len[batch] = -1;
s->bytes_left = bytes_left;
if (bytes_left != 0) { s->error = -2; }
}
}
/**
* @brief process LZ77 symbols and output uncompressed stream
*
* @param s decompression state
* @param t thread id within participating group (lane id)
* @param temp_storage temporary storage used by the algorithm
*
* NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that
*would result in out-of-bounds accesses)
*/
template <typename Storage>
__device__ void snappy_process_symbols(unsnap_state_s* s, int t, Storage& temp_storage)
{
auto const literal_base = s->base;
auto out = s->dst.data();
int batch = 0;
do {
volatile unsnap_batch_s* b = &s->q.batch[batch * batch_size];
int32_t batch_len, blen_t, dist_t;
if (t == 0) {
while ((batch_len = s->q.batch_len[batch]) == 0) {
busy_wait(20);
}
} else {
batch_len = 0;
}
batch_len = shuffle(batch_len);
if (batch_len <= 0) { break; }
if (t < batch_len) {
blen_t = b[t].len;
dist_t = b[t].offset;
} else {
blen_t = dist_t = 0;
}
// Try to combine as many small entries as possible, but try to avoid doing that
// if we see a small repeat distance 8 bytes or less
if (shuffle(min((uint32_t)dist_t, (uint32_t)shuffle_xor(dist_t, 1))) > 8) {
uint32_t n;
do {
uint32_t bofs = WarpReducePos32(blen_t, t);
uint32_t stop_mask = ballot((uint32_t)dist_t < bofs);
uint32_t start_mask =
hipcub::WarpReduce<uint32_t>(temp_storage).Sum((bofs < 32 && t < batch_len) ? 1 << bofs : 0);
start_mask = shuffle(start_mask);
n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)),
(uint32_t)batch_len);
if (n != 0) {
uint32_t it = __popc(start_mask & ((2 << t) - 1));
uint32_t tr = t - shuffle(bofs - blen_t, it);
int32_t dist = shuffle(dist_t, it);
if (it < n) {
const uint8_t* src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist);
out[t] = *src;
}
out += shuffle(bofs, n - 1);
blen_t = shuffle(blen_t, (n + t) & 0x1f);
dist_t = shuffle(dist_t, (n + t) & 0x1f);
batch_len -= n;
}
} while (n >= 4);
}
for (int i = 0; i < batch_len; i++) {
int32_t blen = shuffle(blen_t, i);
int32_t dist = shuffle(dist_t, i);
int32_t blen2 = (i + 1 < batch_len) ? shuffle(blen_t, i + 1) : 32;
// Try to combine consecutive small entries if they are independent
if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) {
int32_t dist2 = shuffle(dist_t, i + 1);
if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) {
int32_t d;
if (t < blen) {
d = dist;
} else {
dist = dist2;
d = (dist2 <= 0) ? dist2 + blen : dist2;
}
blen += blen2;
if (t < blen) {
const uint8_t* src = (dist > 0) ? (out - d) : (literal_base - d);
out[t] = src[t];
}
out += blen;
i++;
continue;
}
}
if (dist > 0) {
// Copy
uint8_t b0, b1;
if (t < blen) {
uint32_t pos = t;
const uint8_t* src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b0 = *src;
}
if (32 + t < blen) {
uint32_t pos = 32 + t;
const uint8_t* src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b1 = *src;
}
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
} else {
// Literal
uint8_t b0, b1;
dist = -dist;
while (blen >= 64) {
b0 = literal_base[dist + t];
b1 = literal_base[dist + 32 + t];
out[t] = b0;
out[32 + t] = b1;
dist += 64;
out += 64;
blen -= 64;
}
if (t < blen) { b0 = literal_base[dist + t]; }
if (32 + t < blen) { b1 = literal_base[dist + 32 + t]; }
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
}
out += blen;
}
__syncwarp();
if (t == 0) { s->q.batch_len[batch] = 0; }
batch = (batch + 1) & (batch_count - 1);
} while (true);
}
/**
* @brief Snappy decompression kernel
* See http://github.com/google/snappy/blob/master/format_description.txt
*
* blockDim {128,1,1}
*
* @param[in] inputs Source & destination information per block
* @param[out] outputs Decompression status per block
*/
template <int block_size>
__global__ void __launch_bounds__(block_size)
unsnap_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<decompress_status> statuses)
{
__shared__ __align__(16) unsnap_state_s state_g;
__shared__ hipcub::WarpReduce<uint32_t>::TempStorage temp_storage;
int t = threadIdx.x;
unsnap_state_s* s = &state_g;
int strm_id = blockIdx.x;
if (t < batch_count) { s->q.batch_len[t] = 0; }
__syncthreads();
if (!t) {
s->src = inputs[strm_id];
s->dst = outputs[strm_id];
auto cur = s->src.begin();
auto const end = s->src.end();
s->error = 0;
if (log_cyclecount) { s->tstart = clock(); }
if (cur < end) {
// Read uncompressed size (varint), limited to 32-bit
uint32_t uncompressed_size = *cur++;
if (uncompressed_size > 0x7f) {
uint32_t c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & 0x7f) | (c << 7);
if (uncompressed_size >= (0x80 << 7)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14);
if (uncompressed_size >= (0x80 << 14)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size =
(uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21);
if (uncompressed_size >= (0x80 << 21)) {
c = (cur < end) ? *cur++ : 0;
if (c < 0x8)
uncompressed_size =
(uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) |
(c << 28);
else
s->error = -1;
}
}
}
}
s->uncompressed_size = uncompressed_size;
s->bytes_left = uncompressed_size;
s->base = cur;
s->end = end;
if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->dst.size())) {
s->error = -1;
}
} else {
s->error = -1;
}
s->q.prefetch_end = 0;
s->q.prefetch_wrpos = 0;
s->q.prefetch_rdpos = 0;
}
__syncthreads();
if (!s->error) {
if (t < 32) {
// WARP0: decode lengths and offsets
snappy_decode_symbols(s, t);
} else if (t < 64) {
// WARP1: prefetch byte stream for WARP0
snappy_prefetch_bytestream(s, t & 0x1f);
} else if (t < 96) {
// WARP2: LZ77
snappy_process_symbols(s, t & 0x1f, temp_storage);
}
__syncthreads();
}
if (!t) {
statuses[strm_id].bytes_written = s->uncompressed_size - s->bytes_left;
statuses[strm_id].status = s->error;
if (log_cyclecount) {
statuses[strm_id].reserved = clock() - s->tstart;
} else {
statuses[strm_id].reserved = 0;
}
}
}
void gpu_unsnap(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<decompress_status> statuses,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block
dim3 dim_grid(inputs.size(), 1); // TODO: Check max grid dimensions vs max expected count
hipLaunchKernelGGL(( unsnap_kernel<128>), dim3(dim_grid), dim3(dim_block), 0, stream.value(), inputs, outputs, statuses);
}
} // namespace io
} // namespace cudf
| 191ad76b9d2783aed5a9bfe85503e63f0d18013e.cu | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gpuinflate.hpp"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <cub/cub.cuh>
namespace cudf {
namespace io {
constexpr int32_t batch_size = (1 << 5);
constexpr int32_t batch_count = (1 << 2);
constexpr int32_t prefetch_size = (1 << 9); // 512B, in 32B chunks
constexpr bool log_cyclecount = false;
void __device__ busy_wait(size_t cycles)
{
clock_t start = clock();
for (;;) {
clock_t const now = clock();
clock_t const elapsed = now > start ? now - start : now + (0xffffffff - start);
if (elapsed >= cycles) return;
}
}
/**
* @brief Describes a single LZ77 symbol (single entry in batch)
*/
struct unsnap_batch_s {
int32_t len; // 1..64 = Number of bytes
uint32_t
offset; // copy distance if greater than zero or negative of literal offset in byte stream
};
/**
* @brief Queue structure used to exchange data between warps
*/
struct unsnap_queue_s {
uint32_t prefetch_wrpos; ///< Prefetcher write position
uint32_t prefetch_rdpos; ///< Prefetch consumer read position
int32_t prefetch_end; ///< Prefetch enable flag (nonzero stops prefetcher)
int32_t batch_len[batch_count]; ///< Length of each batch - <0:end, 0:not ready, >0:symbol count
unsnap_batch_s batch[batch_count * batch_size]; ///< LZ77 batch data
uint8_t buf[prefetch_size]; ///< Prefetch buffer
};
/**
* @brief snappy decompression state
*/
struct unsnap_state_s {
const uint8_t* base; ///< base ptr of compressed stream
const uint8_t* end; ///< end of compressed stream
uint32_t uncompressed_size; ///< uncompressed stream size
uint32_t bytes_left; ///< remaining bytes to decompress
int32_t error; ///< current error status
uint32_t tstart; ///< start time for perf logging
volatile unsnap_queue_s q; ///< queue for cross-warp communication
device_span<uint8_t const> src; ///< input for current block
device_span<uint8_t> dst; ///< output for current block
};
inline __device__ volatile uint8_t& byte_access(unsnap_state_s* s, uint32_t pos)
{
return s->q.buf[pos & (prefetch_size - 1)];
}
/**
* @brief prefetches data for the symbol decoding stage
*
* @param s decompression state
* @param t warp lane id
*/
__device__ void snappy_prefetch_bytestream(unsnap_state_s* s, int t)
{
const uint8_t* base = s->base;
auto end = (uint32_t)(s->end - base);
auto align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base)));
int32_t pos = min(align_bytes, end);
int32_t blen;
// Start by prefetching up to the next a 32B-aligned location
if (t < pos) { s->q.buf[t] = base[t]; }
blen = 0;
do {
__syncwarp();
if (!t) {
uint32_t minrdpos;
s->q.prefetch_wrpos = pos;
minrdpos = pos - min(pos, prefetch_size - 32u);
blen = (int)min(32u, end - pos);
for (;;) {
uint32_t rdpos = s->q.prefetch_rdpos;
if (rdpos >= minrdpos) break;
if (s->q.prefetch_end) {
blen = 0;
break;
}
busy_wait(20);
}
}
blen = shuffle(blen);
if (t < blen) { byte_access(s, pos + t) = base[pos + t]; }
pos += blen;
} while (blen > 0);
}
/**
* @brief Lookup table for get_len3_mask()
*
* Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of
* 3-byte code lengths in the lower 4 bits, along with the total number of
* bytes used for coding the four lengths in the upper 4 bits.
* The upper 4-bit value could also be obtained by 8+__popc(mask4)
*
* for (uint32_t k = 0; k < 1024; k++)
* {
* for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++)
* {
* v |= (b & 1) << i;
* n += (b & 1) + 2;
* b >>= (b & 1) + 2;
* }
* k_len3lut[k] = v | (n << 4);
* }
*/
static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = {
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xb7, 0x94, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xb7, 0xac, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xb7, 0x94, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xb7, 0xac, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xcf, 0x94, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xcf, 0xac, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xcf, 0x94, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xcf, 0xac, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf};
/**
* @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte
* code length, given an input mask of up to 96 bits.
*
* Implemented by doing 8 consecutive lookups, building the result 4-bit at a time
*/
inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2)
{
uint32_t m, v, m4, n;
v = v0;
m4 = k_len3lut[v & 0x3ff];
m = m4 & 0xf;
n = m4 >> 4; // 8..12
v = v0 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 4;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v0, v1, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 8;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 12;
n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16
v1 = __funnelshift_r(v1, v2, n);
v2 >>= n;
v = v1;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 16;
n = m4 >> 4; // 8..12
v = v1 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 20;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v1, v2, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 24;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 28;
return m;
}
/**
* @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length
* minus 2, given two input masks each containing bit0 or bit1 of the corresponding
* code length minus 2 for up to 32 bytes
*/
inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1)
{
uint32_t m;
m = (v1 & 1) * 2 + (v0 & 1);
v0 >>= (m + 2);
v1 >>= (m + 1);
for (uint32_t i = 1; i < 16; i++) {
uint32_t m2 = (v1 & 2) | (v0 & 1);
uint32_t n = m2 + 2;
m |= m2 << (i * 2);
v0 >>= n;
v1 >>= n;
}
return m;
}
/**
* @brief decode symbols and output LZ77 batches (single-warp)
*
* @param s decompression state
* @param t warp lane id
*/
__device__ void snappy_decode_symbols(unsnap_state_s* s, uint32_t t)
{
uint32_t cur = 0;
auto end = static_cast<uint32_t>(s->end - s->base);
uint32_t bytes_left = s->uncompressed_size;
uint32_t dst_pos = 0;
int32_t batch = 0;
for (;;) {
int32_t batch_len;
volatile unsnap_batch_s* b;
// Wait for prefetcher
if (t == 0) {
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * batch_size, end)) {
busy_wait(10);
}
b = &s->q.batch[batch * batch_size];
}
// Process small symbols in parallel: for data that does not get good compression,
// the stream will consist of a large number of short literals (1-byte or 2-byte)
// followed by short repeat runs. This results in many 2-byte or 3-byte symbols
// that can all be decoded in parallel once we know the symbol length.
{
uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask;
uint32_t b0;
cur = shuffle(cur);
cur_t = cur + t;
b0 = byte_access(s, cur_t);
v0 = ballot((b0 == 4) || (b0 & 2));
b0 = byte_access(s, cur_t + 32);
v1 = ballot((b0 == 4) || (b0 & 2));
b0 = byte_access(s, cur_t + 64);
v2 = ballot((b0 == 4) || (b0 & 2));
len3_mask = shuffle((t == 0) ? get_len3_mask(v0, v1, v2) : 0);
cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1));
b0 = byte_access(s, cur_t);
is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0);
short_sym_mask = ballot(is_long_sym);
batch_len = 0;
b = reinterpret_cast<volatile unsnap_batch_s*>(shuffle(reinterpret_cast<uintptr_t>(b)));
if (!(short_sym_mask & 1)) {
batch_len = shuffle((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0);
if (batch_len != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_len) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | byte_access(s, cur_t + 1)
: (b0 & 2) ? byte_access(s, cur_t + 1) | (byte_access(s, cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[t].len = blen;
b[t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = shuffle(bytes_left);
dst_pos = shuffle(dst_pos);
short_sym_mask = __ffs(ballot(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); }
if (batch_len != 0) {
blen = shuffle(blen, batch_len - 1);
cur = shuffle(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
}
}
}
// Check if the batch was stopped by a 3-byte or 4-byte literal
if (batch_len < batch_size - 2 && shuffle(b0 & ~4, batch_len) == 8) {
// If so, run a slower version of the above that can also handle 3/4-byte literal sequences
uint32_t batch_add;
do {
uint32_t clen, mask_t;
cur_t = cur + t;
b0 = byte_access(s, cur_t);
clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2
v0 = ballot(clen & 1);
v1 = ballot((clen >> 1) & 1);
len3_mask = shuffle((t == 0) ? get_len5_mask(v0, v1) : 0);
mask_t = (1 << (2 * t)) - 1;
cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaaaaaa) & mask_t) +
__popc((len3_mask & 0x55555555) & mask_t);
b0 = byte_access(s, cur_t);
is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3 * 4)) || (cur_t >= cur + 32) ||
(batch_len + t >= batch_size);
batch_add = __ffs(ballot(is_long_sym)) - 1;
if (batch_add != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_add) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | byte_access(s, cur_t + 1)
: (b0 & 2) ? byte_access(s, cur_t + 1) | (byte_access(s, cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[batch_len + t].len = blen;
b[batch_len + t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = shuffle(bytes_left);
dst_pos = shuffle(dst_pos);
short_sym_mask = __ffs(ballot(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); }
if (batch_add != 0) {
blen = shuffle(blen, batch_add - 1);
cur = shuffle(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add - 1) * 2)) & 3);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
batch_len += batch_add;
}
}
} while (batch_add >= 6 && batch_len < batch_size - 2);
}
}
if (t == 0) {
while (bytes_left > 0 && batch_len < batch_size) {
uint32_t blen, offset;
uint8_t b0 = byte_access(s, cur);
if (b0 & 3) {
uint8_t b1 = byte_access(s, cur + 1);
if (!(b0 & 2)) {
// xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset
offset = ((b0 & 0xe0) << 3) | b1;
blen = ((b0 >> 2) & 7) + 4;
cur += 2;
} else {
// xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset
offset = b1 | (byte_access(s, cur + 2) << 8);
if (b0 & 1) // 4-byte offset
{
offset |= (byte_access(s, cur + 3) << 16) | (byte_access(s, cur + 4) << 24);
cur += 5;
} else {
cur += 3;
}
blen = (b0 >> 2) + 1;
}
dst_pos += blen;
if (offset - 1u >= dst_pos || bytes_left < blen) break;
bytes_left -= blen;
} else if (b0 < 4 * 4) {
// 0000xx00: short literal
blen = (b0 >> 2) + 1;
offset = -(int32_t)(cur + 1);
cur += 1 + blen;
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
} else {
// xxxxxx00: literal
blen = b0 >> 2;
if (blen >= 60) {
uint32_t num_bytes = blen - 59;
blen = byte_access(s, cur + 1);
if (num_bytes > 1) {
blen |= byte_access(s, cur + 2) << 8;
if (num_bytes > 2) {
blen |= byte_access(s, cur + 3) << 16;
if (num_bytes > 3) { blen |= byte_access(s, cur + 4) << 24; }
}
}
cur += num_bytes;
}
cur += 1;
blen += 1;
offset = -(int32_t)cur;
cur += blen;
// Wait for prefetcher
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * batch_size, end)) {
busy_wait(10);
}
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
}
b[batch_len].len = blen;
b[batch_len].offset = offset;
batch_len++;
}
if (batch_len != 0) {
s->q.batch_len[batch] = batch_len;
batch = (batch + 1) & (batch_count - 1);
}
}
batch_len = shuffle(batch_len);
if (t == 0) {
while (s->q.batch_len[batch] != 0) {
busy_wait(20);
}
}
if (batch_len != batch_size) { break; }
}
if (!t) {
s->q.prefetch_end = 1;
s->q.batch_len[batch] = -1;
s->bytes_left = bytes_left;
if (bytes_left != 0) { s->error = -2; }
}
}
/**
* @brief process LZ77 symbols and output uncompressed stream
*
* @param s decompression state
* @param t thread id within participating group (lane id)
* @param temp_storage temporary storage used by the algorithm
*
* NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that
*would result in out-of-bounds accesses)
*/
template <typename Storage>
__device__ void snappy_process_symbols(unsnap_state_s* s, int t, Storage& temp_storage)
{
auto const literal_base = s->base;
auto out = s->dst.data();
int batch = 0;
do {
volatile unsnap_batch_s* b = &s->q.batch[batch * batch_size];
int32_t batch_len, blen_t, dist_t;
if (t == 0) {
while ((batch_len = s->q.batch_len[batch]) == 0) {
busy_wait(20);
}
} else {
batch_len = 0;
}
batch_len = shuffle(batch_len);
if (batch_len <= 0) { break; }
if (t < batch_len) {
blen_t = b[t].len;
dist_t = b[t].offset;
} else {
blen_t = dist_t = 0;
}
// Try to combine as many small entries as possible, but try to avoid doing that
// if we see a small repeat distance 8 bytes or less
if (shuffle(min((uint32_t)dist_t, (uint32_t)shuffle_xor(dist_t, 1))) > 8) {
uint32_t n;
do {
uint32_t bofs = WarpReducePos32(blen_t, t);
uint32_t stop_mask = ballot((uint32_t)dist_t < bofs);
uint32_t start_mask =
cub::WarpReduce<uint32_t>(temp_storage).Sum((bofs < 32 && t < batch_len) ? 1 << bofs : 0);
start_mask = shuffle(start_mask);
n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)),
(uint32_t)batch_len);
if (n != 0) {
uint32_t it = __popc(start_mask & ((2 << t) - 1));
uint32_t tr = t - shuffle(bofs - blen_t, it);
int32_t dist = shuffle(dist_t, it);
if (it < n) {
const uint8_t* src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist);
out[t] = *src;
}
out += shuffle(bofs, n - 1);
blen_t = shuffle(blen_t, (n + t) & 0x1f);
dist_t = shuffle(dist_t, (n + t) & 0x1f);
batch_len -= n;
}
} while (n >= 4);
}
for (int i = 0; i < batch_len; i++) {
int32_t blen = shuffle(blen_t, i);
int32_t dist = shuffle(dist_t, i);
int32_t blen2 = (i + 1 < batch_len) ? shuffle(blen_t, i + 1) : 32;
// Try to combine consecutive small entries if they are independent
if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) {
int32_t dist2 = shuffle(dist_t, i + 1);
if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) {
int32_t d;
if (t < blen) {
d = dist;
} else {
dist = dist2;
d = (dist2 <= 0) ? dist2 + blen : dist2;
}
blen += blen2;
if (t < blen) {
const uint8_t* src = (dist > 0) ? (out - d) : (literal_base - d);
out[t] = src[t];
}
out += blen;
i++;
continue;
}
}
if (dist > 0) {
// Copy
uint8_t b0, b1;
if (t < blen) {
uint32_t pos = t;
const uint8_t* src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b0 = *src;
}
if (32 + t < blen) {
uint32_t pos = 32 + t;
const uint8_t* src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b1 = *src;
}
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
} else {
// Literal
uint8_t b0, b1;
dist = -dist;
while (blen >= 64) {
b0 = literal_base[dist + t];
b1 = literal_base[dist + 32 + t];
out[t] = b0;
out[32 + t] = b1;
dist += 64;
out += 64;
blen -= 64;
}
if (t < blen) { b0 = literal_base[dist + t]; }
if (32 + t < blen) { b1 = literal_base[dist + 32 + t]; }
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
}
out += blen;
}
__syncwarp();
if (t == 0) { s->q.batch_len[batch] = 0; }
batch = (batch + 1) & (batch_count - 1);
} while (true);
}
/**
* @brief Snappy decompression kernel
* See http://github.com/google/snappy/blob/master/format_description.txt
*
* blockDim {128,1,1}
*
* @param[in] inputs Source & destination information per block
* @param[out] outputs Decompression status per block
*/
template <int block_size>
__global__ void __launch_bounds__(block_size)
unsnap_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<decompress_status> statuses)
{
__shared__ __align__(16) unsnap_state_s state_g;
__shared__ cub::WarpReduce<uint32_t>::TempStorage temp_storage;
int t = threadIdx.x;
unsnap_state_s* s = &state_g;
int strm_id = blockIdx.x;
if (t < batch_count) { s->q.batch_len[t] = 0; }
__syncthreads();
if (!t) {
s->src = inputs[strm_id];
s->dst = outputs[strm_id];
auto cur = s->src.begin();
auto const end = s->src.end();
s->error = 0;
if (log_cyclecount) { s->tstart = clock(); }
if (cur < end) {
// Read uncompressed size (varint), limited to 32-bit
uint32_t uncompressed_size = *cur++;
if (uncompressed_size > 0x7f) {
uint32_t c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & 0x7f) | (c << 7);
if (uncompressed_size >= (0x80 << 7)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14);
if (uncompressed_size >= (0x80 << 14)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size =
(uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21);
if (uncompressed_size >= (0x80 << 21)) {
c = (cur < end) ? *cur++ : 0;
if (c < 0x8)
uncompressed_size =
(uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) |
(c << 28);
else
s->error = -1;
}
}
}
}
s->uncompressed_size = uncompressed_size;
s->bytes_left = uncompressed_size;
s->base = cur;
s->end = end;
if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->dst.size())) {
s->error = -1;
}
} else {
s->error = -1;
}
s->q.prefetch_end = 0;
s->q.prefetch_wrpos = 0;
s->q.prefetch_rdpos = 0;
}
__syncthreads();
if (!s->error) {
if (t < 32) {
// WARP0: decode lengths and offsets
snappy_decode_symbols(s, t);
} else if (t < 64) {
// WARP1: prefetch byte stream for WARP0
snappy_prefetch_bytestream(s, t & 0x1f);
} else if (t < 96) {
// WARP2: LZ77
snappy_process_symbols(s, t & 0x1f, temp_storage);
}
__syncthreads();
}
if (!t) {
statuses[strm_id].bytes_written = s->uncompressed_size - s->bytes_left;
statuses[strm_id].status = s->error;
if (log_cyclecount) {
statuses[strm_id].reserved = clock() - s->tstart;
} else {
statuses[strm_id].reserved = 0;
}
}
}
void gpu_unsnap(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<decompress_status> statuses,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block
dim3 dim_grid(inputs.size(), 1); // TODO: Check max grid dimensions vs max expected count
unsnap_kernel<128><<<dim_grid, dim_block, 0, stream.value()>>>(inputs, outputs, statuses);
}
} // namespace io
} // namespace cudf
|
f5c0503d1c017af920efd2be4a0e38afe6bd78bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/roi_align_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/fluid/memory/memory.h"
namespace phi {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static constexpr int kROISize = 4;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <class T>
__device__ T BilinearInterpolate(
const T* input_data, const int height, const int width, T y, T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <class T>
__global__ void GPURoiAlignForward(const int nthreads,
const T* input_data,
const T* input_rois,
const float spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
int* roi_batch_id_data,
T* output_data,
const bool continuous_coordinate) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_offset = continuous_coordinate ? static_cast<T>(0.5) : 0;
T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset;
T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset;
T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset;
T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_xmax - roi_xmin;
T roi_height = roi_ymax - roi_ymin;
if (!continuous_coordinate) {
roi_width = max(roi_width, static_cast<T>(1.));
roi_height = max(roi_height, static_cast<T>(1.));
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1);
T output_val = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = BilinearInterpolate(offset_input_data, height, width, y, x);
output_val += val;
}
}
output_val /= count;
output_data[i] = output_val;
}
}
template <typename T, typename Context>
void RoiAlignKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& boxes,
const paddle::optional<DenseTensor>& boxes_num,
int pooled_height,
int pooled_width,
float spatial_scale,
int sampling_ratio,
bool aligned,
DenseTensor* out) {
auto in_dims = x.dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = boxes.dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(dev_ctx, &threads, 256);
#endif
DenseTensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
int* roi_batch_id_data = dev_ctx.template HostAlloc<int>(&roi_batch_id_list);
auto cplace = phi::CPUPlace();
auto gplace = dev_ctx.GetPlace();
if (boxes_num) {
int boxes_batch_size = boxes_num->numel();
PADDLE_ENFORCE_EQ(
boxes_batch_size,
batch_size,
errors::InvalidArgument(
"The boxes_batch_size and imgs "
"batch_size must be the same. But received boxes_batch_size = %d, "
"batch_size = %d",
boxes_batch_size,
batch_size));
std::vector<int> boxes_num_list(boxes_batch_size);
paddle::memory::Copy(cplace,
boxes_num_list.data(),
gplace,
boxes_num->data<int>(),
sizeof(int) * boxes_batch_size,
0);
int start = 0;
for (int n = 0; n < boxes_batch_size; ++n) {
for (int i = start; i < start + boxes_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += boxes_num_list[n];
}
} else {
auto lod = boxes.lod();
PADDLE_ENFORCE_EQ(lod.empty(),
false,
errors::InvalidArgument("Input(ROIs) in ROIAlignOp does "
"not contain LoD information."));
auto boxes_lod = lod.back();
int boxes_batch_size = boxes_lod.size() - 1;
PADDLE_ENFORCE_EQ(
boxes_batch_size,
batch_size,
errors::InvalidArgument(
"The batch size of rois and batch size "
"of images must be the same. But received rois batch size = %d, "
"and images batch size = %d",
boxes_batch_size,
batch_size));
int boxes_num_with_lod = boxes_lod[boxes_batch_size];
PADDLE_ENFORCE_EQ(
rois_num,
boxes_num_with_lod,
errors::InvalidArgument(
"The actual number of rois and the number of rois "
"provided from Input(RoIsLoD) in RoIAlign must be the same."
" But received actual number of rois is %d, and the number "
"of rois from RoIsLoD is %d",
rois_num,
boxes_num_with_lod));
for (int n = 0; n < boxes_batch_size; ++n) {
for (size_t i = boxes_lod[n]; i < boxes_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = paddle::memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
paddle::memory::Copy(
gplace, roi_id_data, cplace, roi_batch_id_data, bytes, dev_ctx.stream());
hipLaunchKernelGGL(( GPURoiAlignForward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_size,
x.data<T>(),
boxes.data<T>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
roi_id_data,
dev_ctx.template Alloc<T>(out),
aligned);
}
} // namespace phi
PD_REGISTER_KERNEL(
roi_align, GPU, ALL_LAYOUT, phi::RoiAlignKernel, float, double) {}
| f5c0503d1c017af920efd2be4a0e38afe6bd78bf.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/roi_align_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/fluid/memory/memory.h"
namespace phi {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static constexpr int kROISize = 4;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <class T>
__device__ T BilinearInterpolate(
const T* input_data, const int height, const int width, T y, T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <class T>
__global__ void GPURoiAlignForward(const int nthreads,
const T* input_data,
const T* input_rois,
const float spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
int* roi_batch_id_data,
T* output_data,
const bool continuous_coordinate) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_offset = continuous_coordinate ? static_cast<T>(0.5) : 0;
T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset;
T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset;
T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset;
T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_xmax - roi_xmin;
T roi_height = roi_ymax - roi_ymin;
if (!continuous_coordinate) {
roi_width = max(roi_width, static_cast<T>(1.));
roi_height = max(roi_height, static_cast<T>(1.));
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1);
T output_val = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = BilinearInterpolate(offset_input_data, height, width, y, x);
output_val += val;
}
}
output_val /= count;
output_data[i] = output_val;
}
}
template <typename T, typename Context>
void RoiAlignKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& boxes,
const paddle::optional<DenseTensor>& boxes_num,
int pooled_height,
int pooled_width,
float spatial_scale,
int sampling_ratio,
bool aligned,
DenseTensor* out) {
auto in_dims = x.dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = boxes.dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
#ifdef WITH_NV_JETSON
backends::gpu::ChangeThreadNum(dev_ctx, &threads, 256);
#endif
DenseTensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
int* roi_batch_id_data = dev_ctx.template HostAlloc<int>(&roi_batch_id_list);
auto cplace = phi::CPUPlace();
auto gplace = dev_ctx.GetPlace();
if (boxes_num) {
int boxes_batch_size = boxes_num->numel();
PADDLE_ENFORCE_EQ(
boxes_batch_size,
batch_size,
errors::InvalidArgument(
"The boxes_batch_size and imgs "
"batch_size must be the same. But received boxes_batch_size = %d, "
"batch_size = %d",
boxes_batch_size,
batch_size));
std::vector<int> boxes_num_list(boxes_batch_size);
paddle::memory::Copy(cplace,
boxes_num_list.data(),
gplace,
boxes_num->data<int>(),
sizeof(int) * boxes_batch_size,
0);
int start = 0;
for (int n = 0; n < boxes_batch_size; ++n) {
for (int i = start; i < start + boxes_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += boxes_num_list[n];
}
} else {
auto lod = boxes.lod();
PADDLE_ENFORCE_EQ(lod.empty(),
false,
errors::InvalidArgument("Input(ROIs) in ROIAlignOp does "
"not contain LoD information."));
auto boxes_lod = lod.back();
int boxes_batch_size = boxes_lod.size() - 1;
PADDLE_ENFORCE_EQ(
boxes_batch_size,
batch_size,
errors::InvalidArgument(
"The batch size of rois and batch size "
"of images must be the same. But received rois batch size = %d, "
"and images batch size = %d",
boxes_batch_size,
batch_size));
int boxes_num_with_lod = boxes_lod[boxes_batch_size];
PADDLE_ENFORCE_EQ(
rois_num,
boxes_num_with_lod,
errors::InvalidArgument(
"The actual number of rois and the number of rois "
"provided from Input(RoIsLoD) in RoIAlign must be the same."
" But received actual number of rois is %d, and the number "
"of rois from RoIsLoD is %d",
rois_num,
boxes_num_with_lod));
for (int n = 0; n < boxes_batch_size; ++n) {
for (size_t i = boxes_lod[n]; i < boxes_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = paddle::memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
paddle::memory::Copy(
gplace, roi_id_data, cplace, roi_batch_id_data, bytes, dev_ctx.stream());
GPURoiAlignForward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_size,
x.data<T>(),
boxes.data<T>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
roi_id_data,
dev_ctx.template Alloc<T>(out),
aligned);
}
} // namespace phi
PD_REGISTER_KERNEL(
roi_align, GPU, ALL_LAYOUT, phi::RoiAlignKernel, float, double) {}
|
d26e07e81102e1df5cd7c803a09ba50450dc8f95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define SIZE 256
__global__ void staticReverse(int *d, int n)
{
int s[SIZE]; //Here s is allocated in local memory,
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
// __syncthreads();
d[t] = s[tr];
}
int main(void)
{
const int n = SIZE;
int a[n], r[n], d[n];
for (int i = 0; i < n; i++) {
a[i] = i;
r[i] = n-i-1;
d[i] = 0;
}
int *d_d;
hipMalloc(&d_d, n * sizeof(int));
// run version with static shared memory
hipMemcpy(d_d, a, n*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( staticReverse), dim3(1),dim3(n), 0, 0, d_d, n);
hipMemcpy(d, d_d, n*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
}
| d26e07e81102e1df5cd7c803a09ba50450dc8f95.cu | #include <stdio.h>
#define SIZE 256
__global__ void staticReverse(int *d, int n)
{
int s[SIZE]; //Here s is allocated in local memory,
int t = threadIdx.x;
int tr = n-t-1;
s[t] = d[t];
// __syncthreads();
d[t] = s[tr];
}
int main(void)
{
const int n = SIZE;
int a[n], r[n], d[n];
for (int i = 0; i < n; i++) {
a[i] = i;
r[i] = n-i-1;
d[i] = 0;
}
int *d_d;
cudaMalloc(&d_d, n * sizeof(int));
// run version with static shared memory
cudaMemcpy(d_d, a, n*sizeof(int), cudaMemcpyHostToDevice);
staticReverse<<<1,n>>>(d_d, n);
cudaMemcpy(d, d_d, n*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
if (d[i] != r[i]) printf("Error: d[%d]!=r[%d] (%d, %d)\n", i, i, d[i], r[i]);
}
|
e399ff2e3790b61d4fd6957f7876dc4049f5fcf0.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2005 - 2015 Marc de Kamps
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
// USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// If you use this software in work leading to a scientific publication, you should include a reference there to
// the 'currently valid reference', which can be found at http://miind.sourceforge.net
#include <iostream>
#include <hip/hip_runtime.h>
#include <cstdio>
#include <cmath>
#include "CudaEuler.cuh"
#include "CudaOde2DSystemAdapter.cuh"
using namespace CudaTwoDLib;
namespace {
const float tolerance = 1e-6;
}
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) {
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
CudaOde2DSystemAdapter::CudaOde2DSystemAdapter
(
TwoDLib::Ode2DSystemGroup& group,
MPILib::Time network_time_step
):
_group(group),
_time_step(group.MeshObjects()[0].TimeStep()),
_network_time_step(network_time_step),
_mesh_size(group.MeshObjects().size()),
_n(group.Mass().size()),
_hostmass(_n,0.),
_hostmap(_n,0.),
_offsets(group.Offsets()),
_nr_refractory_steps(group.MeshObjects().size(),0),
_refractory_prop(group.MeshObjects().size(),0),
_refractory_mass(group.MeshObjects().size(),0),
_nr_minimal_resets(_group.MeshObjects().size(),0),
_res_to_minimal(_group.MeshObjects().size(),0),
_res_from_ordered(_group.MeshObjects().size(),0),
_res_alpha_ordered(_group.MeshObjects().size(),0),
_res_from_counts(_group.MeshObjects().size(),0),
_res_from_offsets(_group.MeshObjects().size(),0),
_res_sum(group.MeshObjects().size(),0),
_res_to_mass(group.MeshObjects().size(),0),
_host_fs(group.MeshObjects().size(),0),
_blockSize(256),
_numBlocks( (_n + _blockSize - 1) / _blockSize)
{
this->FillMass();
this->FillMapData();
this->FillReversalMap(group.MeshObjects(),group.MapReversal());
this->FillRefractoryTimes(group.Tau_ref());
this->FillResetMap(group.MeshObjects(),group.MapReset());
}
CudaOde2DSystemAdapter::CudaOde2DSystemAdapter
(
TwoDLib::Ode2DSystemGroup& group
): CudaOde2DSystemAdapter(group, group.MeshObjects()[0].TimeStep())
{
}
void CudaOde2DSystemAdapter::TransferMapData()
{
for( inttype i = 0; i < _n; i++)
_hostmap[i] = _group.Map(i);
checkCudaErrors(hipMemcpy(_map,&_hostmap[0],_n*sizeof(inttype),hipMemcpyHostToDevice));
}
void CudaOde2DSystemAdapter::FillRefractoryTimes(const std::vector<MPILib::Time>& times) {
for(inttype m = 0; m < _mesh_size; m++){
_nr_refractory_steps[m] = 2 + static_cast<int>(::floor(times[m] / _network_time_step));
_refractory_prop[m] = std::abs(::fmod(times[m],_network_time_step) - _network_time_step) < 0.000001 ? 0 : ::fmod(times[m],_network_time_step)/_network_time_step;
}
}
void CudaOde2DSystemAdapter::FillMapData(){
checkCudaErrors(hipMalloc((inttype**)&_map,_n*sizeof(inttype)));
this->TransferMapData();
}
void CudaOde2DSystemAdapter::DeleteMass()
{
hipFree(_mass);
}
void CudaOde2DSystemAdapter::DeleteMapData()
{
hipFree(_map);
}
CudaOde2DSystemAdapter::~CudaOde2DSystemAdapter()
{
this->DeleteMass();
this->DeleteMapData();
this->DeleteReversalMap();
this->DeleteResetMap();
}
void CudaOde2DSystemAdapter::FillMass()
{
checkCudaErrors(hipMalloc((fptype**)&_mass,_n*sizeof(fptype)));
for(inttype i = 0; i < _n; i++)
_hostmass[i] = _group.Mass()[i];
this->Validate();
checkCudaErrors(hipMemcpy(_mass,&_hostmass[0],_n*sizeof(fptype),hipMemcpyHostToDevice));
}
void CudaOde2DSystemAdapter::Validate() const
{
// check wether the mass array of the Ode2DSystemGroup has been initialized properly. This means the mass must
// add up to the number of meshes
fptype sum = 0.;
for(int i = 0; i < _n; i++)
sum += _hostmass[i];
fptype nmesh = static_cast<fptype>(_group.MeshObjects().size());
if (fabs(sum - nmesh ) > tolerance){
fprintf(stderr,"Total mass unequal to number of mesh objects:%f, %f\n",sum,nmesh);
exit(0);
}
}
void CudaOde2DSystemAdapter::Evolve()
{
_group.Evolve();
this->TransferMapData();
}
void CudaOde2DSystemAdapter::Evolve(std::vector<inttype>& meshes)
{
_group.Evolve(meshes);
this->TransferMapData();
}
void CudaOde2DSystemAdapter::EvolveWithoutMeshUpdate()
{
_group.EvolveWithoutMeshUpdate();
this->TransferMapData();
}
void CudaOde2DSystemAdapter::Dump(const std::vector<std::ostream*>& vec_stream, int mode)
{
checkCudaErrors(hipMemcpy(&_hostmass[0],_mass,_n*sizeof(fptype),hipMemcpyDeviceToHost));
for(inttype i = 0; i < _n; i++)
_group.Mass()[i] = _hostmass[i];
_group.Dump(vec_stream, mode);
}
void CudaOde2DSystemAdapter::updateGroupMass()
{
checkCudaErrors(hipMemcpy(&_hostmass[0],_mass,_n*sizeof(fptype),hipMemcpyDeviceToHost));
for(inttype i = 0; i < _n; i++){
_group.Mass()[i] = _hostmass[i];
}
}
const std::vector<fptype>& CudaOde2DSystemAdapter::F(unsigned int n_steps) const
{
_host_fs.clear();
for(inttype m = 0; m < _mesh_size; m++)
{
inttype numBlocks = (_nr_minimal_resets[m] + _blockSize - 1)/_blockSize;
vector<fptype> host_sum(numBlocks,0.);
checkCudaErrors(hipMemcpy(&host_sum[0],_res_sum[m],numBlocks*sizeof(fptype),hipMemcpyDeviceToHost));
fptype sum = 0.0;
for (auto& rate: host_sum)
sum += rate;
_host_fs.push_back(sum/(_time_step*n_steps));
}
return _host_fs;
}
void CudaOde2DSystemAdapter::FillResetMap
(
const std::vector<TwoDLib::Mesh>& vec_mesh,
const std::vector<std::vector<TwoDLib::Redistribution> >& vec_vec_reset
)
{
hipMalloc(&_fs, _mesh_size*sizeof(fptype));
std::vector<fptype> vec_rates(_mesh_size,0.);
checkCudaErrors(hipMemcpy(_fs,&vec_rates[0],_mesh_size*sizeof(fptype),hipMemcpyHostToDevice));
for(inttype m = 0; m < _mesh_size; m++)
{
std::map<inttype, std::vector<std::pair<inttype,fptype>>> reset_map;
for(inttype i = 0; i < vec_vec_reset[m].size(); i++){
reset_map[_group.Map(m,vec_vec_reset[m][i]._to[0], vec_vec_reset[m][i]._to[1])].push_back(
std::pair<inttype,fptype>(_group.Map(m,vec_vec_reset[m][i]._from[0],vec_vec_reset[m][i]._from[1]),
vec_vec_reset[m][i]._alpha));
}
_nr_minimal_resets[m] = reset_map.size();
_nr_resets.push_back(vec_vec_reset[m].size());
checkCudaErrors(hipMalloc((fptype**)&_refractory_mass[m], _nr_refractory_steps[m]*vec_vec_reset[m].size()*sizeof(fptype)));
checkCudaErrors(hipMalloc((inttype**)&_res_to_minimal[m], _nr_minimal_resets[m]*sizeof(inttype)));
checkCudaErrors(hipMalloc((inttype**)&_res_from_ordered[m], vec_vec_reset[m].size()*sizeof(inttype)));
checkCudaErrors(hipMalloc((fptype**)&_res_alpha_ordered[m], vec_vec_reset[m].size()*sizeof(fptype)));
checkCudaErrors(hipMalloc((fptype**)&_res_from_counts[m], _nr_minimal_resets[m]*sizeof(fptype)));
checkCudaErrors(hipMalloc((fptype**)&_res_from_offsets[m], _nr_minimal_resets[m]*sizeof(fptype)));
checkCudaErrors(hipMalloc((fptype**)&_res_to_mass[m],_nr_minimal_resets[m]*sizeof(fptype)));
inttype numBlocks = (_nr_minimal_resets[m] + _blockSize - 1)/_blockSize;
checkCudaErrors(hipMalloc((fptype**)&_res_sum[m], numBlocks*sizeof(fptype)));
std::vector<inttype> vec_to_min;
std::vector<inttype> vec_from_ord;
std::vector<fptype> vec_alpha_ord;
std::vector<inttype> counts;
std::vector<inttype> offsets;
unsigned int offset_count = 0;
std::map<inttype, std::vector<std::pair<inttype,fptype>>>::iterator it;
for ( it = reset_map.begin(); it != reset_map.end(); it++ ){
vec_to_min.push_back(it->first);
counts.push_back(it->second.size());
offsets.push_back(offset_count);
offset_count += it->second.size();
for(int i=0; i<it->second.size(); i++){
vec_from_ord.push_back(it->second[i].first);
vec_alpha_ord.push_back(it->second[i].second);
}
}
checkCudaErrors(hipMemcpy(_res_to_minimal[m],&vec_to_min[0],vec_to_min.size()*sizeof(inttype),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_res_from_ordered[m],&vec_from_ord[0],vec_from_ord.size()*sizeof(inttype),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_res_alpha_ordered[m],&vec_alpha_ord[0],vec_alpha_ord.size()*sizeof(fptype),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_res_from_counts[m],&counts[0],counts.size()*sizeof(inttype),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_res_from_offsets[m],&offsets[0],offsets.size()*sizeof(inttype),hipMemcpyHostToDevice));
}
}
void CudaOde2DSystemAdapter::RedistributeProbability(std::vector<inttype>& meshes)
{
for(inttype i = 0; i < meshes.size(); i++)
{
inttype m = meshes[i];
// be careful to use this block size
inttype numBlocks = (_nr_minimal_resets[m] + _blockSize - 1)/_blockSize;
inttype numSumBlocks = (numBlocks + _blockSize - 1)/_blockSize;
inttype numResetBlocks = (_nr_resets[m] + _blockSize - 1)/_blockSize;
hipLaunchKernelGGL(( CudaClearDerivative), dim3(numBlocks),dim3(_blockSize), 0, 0, _nr_minimal_resets[m],_res_to_mass[m],_mass);
hipLaunchKernelGGL(( CudaClearDerivative), dim3(numSumBlocks),dim3(_blockSize), 0, 0, numBlocks,_res_sum[m],_mass);
for(int t = _nr_refractory_steps[m]-2; t >= 0; t--){
hipLaunchKernelGGL(( MapResetShiftRefractory), dim3(numResetBlocks),dim3(_blockSize), 0, 0, _nr_resets[m],_refractory_mass[m], t*_nr_resets[m]);
}
hipLaunchKernelGGL(( MapResetToRefractory), dim3(numResetBlocks),dim3(_blockSize), 0, 0, _nr_resets[m],_res_from_ordered[m], _mass, _map, _refractory_mass[m]);
hipLaunchKernelGGL(( GetResetMass), dim3(numBlocks),dim3(_blockSize), 0, 0, _nr_minimal_resets[m], _res_to_mass[m], _refractory_mass[m],
_res_alpha_ordered[m], _res_from_offsets[m], _res_from_counts[m]);
hipLaunchKernelGGL(( MapResetThreaded), dim3(numBlocks),dim3(_blockSize), 0, 0, _nr_minimal_resets[m], _mass, _refractory_mass[m],
(_nr_refractory_steps[m]-1)*_nr_resets[m],
_res_to_minimal[m],_res_alpha_ordered[m], _res_from_offsets[m], _res_from_counts[m], _map, _refractory_prop[m]);
hipLaunchKernelGGL(( MapResetThreaded), dim3(numBlocks),dim3(_blockSize), 0, 0, _nr_minimal_resets[m], _mass, _refractory_mass[m],
(_nr_refractory_steps[m]-2)*_nr_resets[m],
_res_to_minimal[m],_res_alpha_ordered[m], _res_from_offsets[m], _res_from_counts[m], _map, 1.0 - _refractory_prop[m]);
hipLaunchKernelGGL(( SumReset), dim3(numBlocks),dim3(_blockSize),_blockSize*sizeof(fptype), 0, _nr_minimal_resets[m],_res_to_mass[m],_res_sum[m]);
}
hipDeviceSynchronize();
}
void CudaOde2DSystemAdapter::RedistributeProbability()
{
std::vector<inttype> meshes(_mesh_size);
for(int i=0;i<_mesh_size;i++)
meshes[i] = i;
RedistributeProbability(meshes);
}
void CudaOde2DSystemAdapter::MapFinish(std::vector<inttype>& meshes)
{
for(inttype i = 0; i < meshes.size(); i++)
{
inttype m = meshes[i];
// be careful to use this block size
inttype numBlocks = (_nr_resets[m] + _blockSize - 1)/_blockSize;
hipLaunchKernelGGL(( ResetFinishThreaded), dim3(numBlocks),dim3(_blockSize), 0, 0, _nr_resets[m],_res_from_ordered[m],_mass,_map);
}
hipDeviceSynchronize();
}
void CudaOde2DSystemAdapter::MapFinish()
{
std::vector<inttype> meshes(_mesh_size);
for(int i=0;i<_mesh_size;i++)
meshes[i] = i;
MapFinish(meshes);
}
void CudaOde2DSystemAdapter::FillReversalMap
(
const std::vector<TwoDLib::Mesh>& vec_mesh,
const std::vector<std::vector<TwoDLib::Redistribution> >& vec_vec_reversal
)
{
_n_rev = 0;
for(inttype m = 0; m < vec_mesh.size(); m++)
_n_rev += vec_vec_reversal[m].size();
hipMallocManaged(&_rev_to, _n_rev*sizeof(inttype));
hipMallocManaged(&_rev_from, _n_rev*sizeof(inttype));
hipMallocManaged(&_rev_alpha, _n_rev*sizeof(fptype));
inttype index = 0;
for(inttype m = 0; m < vec_mesh.size(); m++){
for( const TwoDLib::Redistribution& r: vec_vec_reversal[m] ){
_rev_to[index] = _group.Map(m,r._to[0],r._to[1]);
_rev_from[index] = _group.Map(m,r._from[0],r._from[1]);
_rev_alpha[index] = r._alpha;
index++;
}
}
}
void CudaOde2DSystemAdapter::RemapReversal()
{
hipLaunchKernelGGL(( MapReversal), dim3(1),dim3(1), 0, 0, _n_rev, _rev_from, _rev_to, _rev_alpha, _mass, _map);
hipDeviceSynchronize();
}
void CudaOde2DSystemAdapter::DeleteResetMap()
{
hipFree(_fs);
for(inttype m = 0; m < _mesh_size; m++)
{
hipFree(_res_to_minimal[m]);
hipFree(_res_from_ordered[m]);
hipFree(_res_from_counts[m]);
hipFree(_res_alpha_ordered[m]);
hipFree(_res_from_offsets[m]);
hipFree(_res_to_mass[m]);
hipFree(_res_sum[m]);
}
}
void CudaOde2DSystemAdapter::DeleteReversalMap()
{
hipFree(_rev_to);
hipFree(_rev_from);
hipFree(_rev_alpha);
}
| e399ff2e3790b61d4fd6957f7876dc4049f5fcf0.cu | // Copyright (c) 2005 - 2015 Marc de Kamps
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
// USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// If you use this software in work leading to a scientific publication, you should include a reference there to
// the 'currently valid reference', which can be found at http://miind.sourceforge.net
#include <iostream>
#include <cuda_runtime.h>
#include <cstdio>
#include <cmath>
#include "CudaEuler.cuh"
#include "CudaOde2DSystemAdapter.cuh"
using namespace CudaTwoDLib;
namespace {
const float tolerance = 1e-6;
}
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
CudaOde2DSystemAdapter::CudaOde2DSystemAdapter
(
TwoDLib::Ode2DSystemGroup& group,
MPILib::Time network_time_step
):
_group(group),
_time_step(group.MeshObjects()[0].TimeStep()),
_network_time_step(network_time_step),
_mesh_size(group.MeshObjects().size()),
_n(group.Mass().size()),
_hostmass(_n,0.),
_hostmap(_n,0.),
_offsets(group.Offsets()),
_nr_refractory_steps(group.MeshObjects().size(),0),
_refractory_prop(group.MeshObjects().size(),0),
_refractory_mass(group.MeshObjects().size(),0),
_nr_minimal_resets(_group.MeshObjects().size(),0),
_res_to_minimal(_group.MeshObjects().size(),0),
_res_from_ordered(_group.MeshObjects().size(),0),
_res_alpha_ordered(_group.MeshObjects().size(),0),
_res_from_counts(_group.MeshObjects().size(),0),
_res_from_offsets(_group.MeshObjects().size(),0),
_res_sum(group.MeshObjects().size(),0),
_res_to_mass(group.MeshObjects().size(),0),
_host_fs(group.MeshObjects().size(),0),
_blockSize(256),
_numBlocks( (_n + _blockSize - 1) / _blockSize)
{
this->FillMass();
this->FillMapData();
this->FillReversalMap(group.MeshObjects(),group.MapReversal());
this->FillRefractoryTimes(group.Tau_ref());
this->FillResetMap(group.MeshObjects(),group.MapReset());
}
CudaOde2DSystemAdapter::CudaOde2DSystemAdapter
(
TwoDLib::Ode2DSystemGroup& group
): CudaOde2DSystemAdapter(group, group.MeshObjects()[0].TimeStep())
{
}
void CudaOde2DSystemAdapter::TransferMapData()
{
for( inttype i = 0; i < _n; i++)
_hostmap[i] = _group.Map(i);
checkCudaErrors(cudaMemcpy(_map,&_hostmap[0],_n*sizeof(inttype),cudaMemcpyHostToDevice));
}
void CudaOde2DSystemAdapter::FillRefractoryTimes(const std::vector<MPILib::Time>& times) {
for(inttype m = 0; m < _mesh_size; m++){
_nr_refractory_steps[m] = 2 + static_cast<int>(std::floor(times[m] / _network_time_step));
_refractory_prop[m] = std::abs(std::fmod(times[m],_network_time_step) - _network_time_step) < 0.000001 ? 0 : std::fmod(times[m],_network_time_step)/_network_time_step;
}
}
void CudaOde2DSystemAdapter::FillMapData(){
checkCudaErrors(cudaMalloc((inttype**)&_map,_n*sizeof(inttype)));
this->TransferMapData();
}
void CudaOde2DSystemAdapter::DeleteMass()
{
cudaFree(_mass);
}
void CudaOde2DSystemAdapter::DeleteMapData()
{
cudaFree(_map);
}
CudaOde2DSystemAdapter::~CudaOde2DSystemAdapter()
{
this->DeleteMass();
this->DeleteMapData();
this->DeleteReversalMap();
this->DeleteResetMap();
}
void CudaOde2DSystemAdapter::FillMass()
{
checkCudaErrors(cudaMalloc((fptype**)&_mass,_n*sizeof(fptype)));
for(inttype i = 0; i < _n; i++)
_hostmass[i] = _group.Mass()[i];
this->Validate();
checkCudaErrors(cudaMemcpy(_mass,&_hostmass[0],_n*sizeof(fptype),cudaMemcpyHostToDevice));
}
void CudaOde2DSystemAdapter::Validate() const
{
// check wether the mass array of the Ode2DSystemGroup has been initialized properly. This means the mass must
// add up to the number of meshes
fptype sum = 0.;
for(int i = 0; i < _n; i++)
sum += _hostmass[i];
fptype nmesh = static_cast<fptype>(_group.MeshObjects().size());
if (fabs(sum - nmesh ) > tolerance){
fprintf(stderr,"Total mass unequal to number of mesh objects:%f, %f\n",sum,nmesh);
exit(0);
}
}
void CudaOde2DSystemAdapter::Evolve()
{
_group.Evolve();
this->TransferMapData();
}
void CudaOde2DSystemAdapter::Evolve(std::vector<inttype>& meshes)
{
_group.Evolve(meshes);
this->TransferMapData();
}
void CudaOde2DSystemAdapter::EvolveWithoutMeshUpdate()
{
_group.EvolveWithoutMeshUpdate();
this->TransferMapData();
}
void CudaOde2DSystemAdapter::Dump(const std::vector<std::ostream*>& vec_stream, int mode)
{
checkCudaErrors(cudaMemcpy(&_hostmass[0],_mass,_n*sizeof(fptype),cudaMemcpyDeviceToHost));
for(inttype i = 0; i < _n; i++)
_group.Mass()[i] = _hostmass[i];
_group.Dump(vec_stream, mode);
}
void CudaOde2DSystemAdapter::updateGroupMass()
{
checkCudaErrors(cudaMemcpy(&_hostmass[0],_mass,_n*sizeof(fptype),cudaMemcpyDeviceToHost));
for(inttype i = 0; i < _n; i++){
_group.Mass()[i] = _hostmass[i];
}
}
const std::vector<fptype>& CudaOde2DSystemAdapter::F(unsigned int n_steps) const
{
_host_fs.clear();
for(inttype m = 0; m < _mesh_size; m++)
{
inttype numBlocks = (_nr_minimal_resets[m] + _blockSize - 1)/_blockSize;
vector<fptype> host_sum(numBlocks,0.);
checkCudaErrors(cudaMemcpy(&host_sum[0],_res_sum[m],numBlocks*sizeof(fptype),cudaMemcpyDeviceToHost));
fptype sum = 0.0;
for (auto& rate: host_sum)
sum += rate;
_host_fs.push_back(sum/(_time_step*n_steps));
}
return _host_fs;
}
void CudaOde2DSystemAdapter::FillResetMap
(
const std::vector<TwoDLib::Mesh>& vec_mesh,
const std::vector<std::vector<TwoDLib::Redistribution> >& vec_vec_reset
)
{
cudaMalloc(&_fs, _mesh_size*sizeof(fptype));
std::vector<fptype> vec_rates(_mesh_size,0.);
checkCudaErrors(cudaMemcpy(_fs,&vec_rates[0],_mesh_size*sizeof(fptype),cudaMemcpyHostToDevice));
for(inttype m = 0; m < _mesh_size; m++)
{
std::map<inttype, std::vector<std::pair<inttype,fptype>>> reset_map;
for(inttype i = 0; i < vec_vec_reset[m].size(); i++){
reset_map[_group.Map(m,vec_vec_reset[m][i]._to[0], vec_vec_reset[m][i]._to[1])].push_back(
std::pair<inttype,fptype>(_group.Map(m,vec_vec_reset[m][i]._from[0],vec_vec_reset[m][i]._from[1]),
vec_vec_reset[m][i]._alpha));
}
_nr_minimal_resets[m] = reset_map.size();
_nr_resets.push_back(vec_vec_reset[m].size());
checkCudaErrors(cudaMalloc((fptype**)&_refractory_mass[m], _nr_refractory_steps[m]*vec_vec_reset[m].size()*sizeof(fptype)));
checkCudaErrors(cudaMalloc((inttype**)&_res_to_minimal[m], _nr_minimal_resets[m]*sizeof(inttype)));
checkCudaErrors(cudaMalloc((inttype**)&_res_from_ordered[m], vec_vec_reset[m].size()*sizeof(inttype)));
checkCudaErrors(cudaMalloc((fptype**)&_res_alpha_ordered[m], vec_vec_reset[m].size()*sizeof(fptype)));
checkCudaErrors(cudaMalloc((fptype**)&_res_from_counts[m], _nr_minimal_resets[m]*sizeof(fptype)));
checkCudaErrors(cudaMalloc((fptype**)&_res_from_offsets[m], _nr_minimal_resets[m]*sizeof(fptype)));
checkCudaErrors(cudaMalloc((fptype**)&_res_to_mass[m],_nr_minimal_resets[m]*sizeof(fptype)));
inttype numBlocks = (_nr_minimal_resets[m] + _blockSize - 1)/_blockSize;
checkCudaErrors(cudaMalloc((fptype**)&_res_sum[m], numBlocks*sizeof(fptype)));
std::vector<inttype> vec_to_min;
std::vector<inttype> vec_from_ord;
std::vector<fptype> vec_alpha_ord;
std::vector<inttype> counts;
std::vector<inttype> offsets;
unsigned int offset_count = 0;
std::map<inttype, std::vector<std::pair<inttype,fptype>>>::iterator it;
for ( it = reset_map.begin(); it != reset_map.end(); it++ ){
vec_to_min.push_back(it->first);
counts.push_back(it->second.size());
offsets.push_back(offset_count);
offset_count += it->second.size();
for(int i=0; i<it->second.size(); i++){
vec_from_ord.push_back(it->second[i].first);
vec_alpha_ord.push_back(it->second[i].second);
}
}
checkCudaErrors(cudaMemcpy(_res_to_minimal[m],&vec_to_min[0],vec_to_min.size()*sizeof(inttype),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_res_from_ordered[m],&vec_from_ord[0],vec_from_ord.size()*sizeof(inttype),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_res_alpha_ordered[m],&vec_alpha_ord[0],vec_alpha_ord.size()*sizeof(fptype),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_res_from_counts[m],&counts[0],counts.size()*sizeof(inttype),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_res_from_offsets[m],&offsets[0],offsets.size()*sizeof(inttype),cudaMemcpyHostToDevice));
}
}
void CudaOde2DSystemAdapter::RedistributeProbability(std::vector<inttype>& meshes)
{
for(inttype i = 0; i < meshes.size(); i++)
{
inttype m = meshes[i];
// be careful to use this block size
inttype numBlocks = (_nr_minimal_resets[m] + _blockSize - 1)/_blockSize;
inttype numSumBlocks = (numBlocks + _blockSize - 1)/_blockSize;
inttype numResetBlocks = (_nr_resets[m] + _blockSize - 1)/_blockSize;
CudaClearDerivative<<<numBlocks,_blockSize>>>(_nr_minimal_resets[m],_res_to_mass[m],_mass);
CudaClearDerivative<<<numSumBlocks,_blockSize>>>(numBlocks,_res_sum[m],_mass);
for(int t = _nr_refractory_steps[m]-2; t >= 0; t--){
MapResetShiftRefractory<<<numResetBlocks,_blockSize>>>(_nr_resets[m],_refractory_mass[m], t*_nr_resets[m]);
}
MapResetToRefractory<<<numResetBlocks,_blockSize>>>(_nr_resets[m],_res_from_ordered[m], _mass, _map, _refractory_mass[m]);
GetResetMass<<<numBlocks,_blockSize>>>(_nr_minimal_resets[m], _res_to_mass[m], _refractory_mass[m],
_res_alpha_ordered[m], _res_from_offsets[m], _res_from_counts[m]);
MapResetThreaded<<<numBlocks,_blockSize>>>(_nr_minimal_resets[m], _mass, _refractory_mass[m],
(_nr_refractory_steps[m]-1)*_nr_resets[m],
_res_to_minimal[m],_res_alpha_ordered[m], _res_from_offsets[m], _res_from_counts[m], _map, _refractory_prop[m]);
MapResetThreaded<<<numBlocks,_blockSize>>>(_nr_minimal_resets[m], _mass, _refractory_mass[m],
(_nr_refractory_steps[m]-2)*_nr_resets[m],
_res_to_minimal[m],_res_alpha_ordered[m], _res_from_offsets[m], _res_from_counts[m], _map, 1.0 - _refractory_prop[m]);
SumReset<<<numBlocks,_blockSize,_blockSize*sizeof(fptype)>>>(_nr_minimal_resets[m],_res_to_mass[m],_res_sum[m]);
}
cudaDeviceSynchronize();
}
void CudaOde2DSystemAdapter::RedistributeProbability()
{
std::vector<inttype> meshes(_mesh_size);
for(int i=0;i<_mesh_size;i++)
meshes[i] = i;
RedistributeProbability(meshes);
}
void CudaOde2DSystemAdapter::MapFinish(std::vector<inttype>& meshes)
{
for(inttype i = 0; i < meshes.size(); i++)
{
inttype m = meshes[i];
// be careful to use this block size
inttype numBlocks = (_nr_resets[m] + _blockSize - 1)/_blockSize;
ResetFinishThreaded<<<numBlocks,_blockSize>>>(_nr_resets[m],_res_from_ordered[m],_mass,_map);
}
cudaDeviceSynchronize();
}
void CudaOde2DSystemAdapter::MapFinish()
{
std::vector<inttype> meshes(_mesh_size);
for(int i=0;i<_mesh_size;i++)
meshes[i] = i;
MapFinish(meshes);
}
void CudaOde2DSystemAdapter::FillReversalMap
(
const std::vector<TwoDLib::Mesh>& vec_mesh,
const std::vector<std::vector<TwoDLib::Redistribution> >& vec_vec_reversal
)
{
_n_rev = 0;
for(inttype m = 0; m < vec_mesh.size(); m++)
_n_rev += vec_vec_reversal[m].size();
cudaMallocManaged(&_rev_to, _n_rev*sizeof(inttype));
cudaMallocManaged(&_rev_from, _n_rev*sizeof(inttype));
cudaMallocManaged(&_rev_alpha, _n_rev*sizeof(fptype));
inttype index = 0;
for(inttype m = 0; m < vec_mesh.size(); m++){
for( const TwoDLib::Redistribution& r: vec_vec_reversal[m] ){
_rev_to[index] = _group.Map(m,r._to[0],r._to[1]);
_rev_from[index] = _group.Map(m,r._from[0],r._from[1]);
_rev_alpha[index] = r._alpha;
index++;
}
}
}
void CudaOde2DSystemAdapter::RemapReversal()
{
MapReversal<<<1,1>>>(_n_rev, _rev_from, _rev_to, _rev_alpha, _mass, _map);
cudaDeviceSynchronize();
}
void CudaOde2DSystemAdapter::DeleteResetMap()
{
cudaFree(_fs);
for(inttype m = 0; m < _mesh_size; m++)
{
cudaFree(_res_to_minimal[m]);
cudaFree(_res_from_ordered[m]);
cudaFree(_res_from_counts[m]);
cudaFree(_res_alpha_ordered[m]);
cudaFree(_res_from_offsets[m]);
cudaFree(_res_to_mass[m]);
cudaFree(_res_sum[m]);
}
}
void CudaOde2DSystemAdapter::DeleteReversalMap()
{
cudaFree(_rev_to);
cudaFree(_rev_from);
cudaFree(_rev_alpha);
}
|
514036cd182c41bd3e54db6c2dd24d466db2110d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cmath>
#include <cstdio>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define BLOCK_SIZE 32
#define ERROR 1.0e-9
typedef unsigned long long int LONG;
void safe_call(hipError_t ret, int line)
{
if(ret!=hipSuccess)
{
cout << "Error at line " << line << " : " << hipGetErrorString(ret) << endl;
exit(-1);
}
}
void printMat(double *A, LONG N)
{
LONG i,j;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
cout << A[i*N+j] << " ";
cout<<endl;
}
}
__global__ void gpuMM(double *A, double *B, double *C, LONG N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
LONG row = threadIdx.y;
LONG col = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.f;
for (LONG n = 0; n < N; n++)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
__global__ void gpuMM_um(double *A, double *B, double *C, LONG N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
LONG row = blockIdx.y*blockDim.y + threadIdx.y;
LONG col = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.f;
for (LONG n = 0; n < N; ++n)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
struct timeval t1,t2, tnp, tp;
double tt, gflops;
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
// Restricted to matrices where N = K*BLOCK_SIZE;
LONG N,K;
cin >> K;
N = K*BLOCK_SIZE;
CUDA_SAFE_CALL(hipSetDevice(0));
cout << "Executing Matrix Multiplcation" << endl;
cout << "Matrix size: " << N << "x" << N << endl;
// Allocate memory on the host
double *hA,*hB,*hC;
hA = new double[N*N];
hB = new double[N*N];
hC = new double[N*N];
// Initialize matrices on the host
srand(time(NULL));
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
hA[j*N+i] = drand48();
hB[j*N+i] = drand48();
}
}
// Allocate memory on the device
LONG size = N*N*sizeof(double); // Size of the memory in bytes
// Allocate memory to store the GPU answer on the host
double *C;
C = new double[N*N];
dim3 threadBlock(BLOCK_SIZE,K);
dim3 grid(K);
double *dA,*dB,*dC,*dAT,*dCT,*dTemp;
/* With prefetching begins */
CUDA_SAFE_CALL(hipHostMalloc(&dB,size));
CUDA_SAFE_CALL(hipHostMalloc(&dA,(K*size/N)));
CUDA_SAFE_CALL(hipHostMalloc(&dC,(K*size/N)));
CUDA_SAFE_CALL(hipHostMalloc(&dAT,(K*size/N)));
CUDA_SAFE_CALL(hipHostMalloc(&dCT,(K*size/N)));
hipStream_t s1,s2,s3;
CUDA_SAFE_CALL(hipStreamCreate(&s1));
CUDA_SAFE_CALL(hipStreamCreate(&s2));
CUDA_SAFE_CALL(hipStreamCreate(&s3));
gettimeofday(&t1,0);
// Copy matrices from the host to device
CUDA_SAFE_CALL(hipMemcpyAsync(dB,hB,size,hipMemcpyHostToDevice,s1));
CUDA_SAFE_CALL(hipMemcpyAsync(dA,hA,K*(size/N),hipMemcpyHostToDevice,s1));
hipLaunchKernelGGL(( gpuMM), dim3(grid),dim3(threadBlock),0,s1, dA,dB,dC,N);
for(LONG i=1; i< (N/K); i++){
// Prefetch the next set of rows
CUDA_SAFE_CALL(hipMemcpyAsync(dAT,hA+i*N*K,(K*size/N),hipMemcpyHostToDevice,s2));
CUDA_SAFE_CALL(hipDeviceSynchronize());
//Swap pointers
dTemp = dAT;
dAT = dA;
dA = dTemp;
dTemp = dCT;
dCT = dC;
dC = dTemp;
//Execute the matrix multiplication kernel
hipLaunchKernelGGL(( gpuMM), dim3(grid),dim3(threadBlock),0,s1, dA,dB,dC,N);
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(hipMemcpyAsync(C+(i-1)*N*K,dCT,(K*size/N),hipMemcpyDeviceToHost,s3));
}
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipMemcpyAsync(C+((N/K)-1)*N*K,dC,(K*size/N),hipMemcpyDeviceToHost,s3));
gettimeofday(&t2,0);
timersub(&t2,&t1,&tp);
tt = (double) tp.tv_sec + ((double) tp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Prefetch : " << gflops << endl;
CUDA_SAFE_CALL(hipStreamDestroy(s1));
CUDA_SAFE_CALL(hipStreamDestroy(s2));
CUDA_SAFE_CALL(hipStreamDestroy(s3));
CUDA_SAFE_CALL(hipHostFree(dB));
CUDA_SAFE_CALL(hipHostFree(dA));
CUDA_SAFE_CALL(hipHostFree(dC));
CUDA_SAFE_CALL(hipHostFree(dAT));
CUDA_SAFE_CALL(hipHostFree(dCT));
/* Without prefetching begins */
CUDA_SAFE_CALL(hipMalloc(&dB,size));
CUDA_SAFE_CALL(hipMalloc(&dA,(K*size/N)));
CUDA_SAFE_CALL(hipMalloc(&dC,(K*size/N)));
gettimeofday(&t1,0);
CUDA_SAFE_CALL(hipMemcpy(dB,hB,size,hipMemcpyHostToDevice));
for(LONG i=0; i< (N/K); i++){
//cout << "Iteration " << i << endl;
CUDA_SAFE_CALL(hipMemcpy(dA,hA+i*N*K,(K*size/N),hipMemcpyHostToDevice));
//Execute the matrix multiplication kernel
hipLaunchKernelGGL(( gpuMM), dim3(grid),dim3(threadBlock), 0, 0, dA,dB,dC,N);
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(hipMemcpy(C+i*N*K,dC,(K*size/N),hipMemcpyDeviceToHost));
}
CUDA_SAFE_CALL(hipDeviceSynchronize());
gettimeofday(&t2,0);
timersub(&t2,&t1,&tnp);
tt = (double) tnp.tv_sec + ((double) tnp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Without Prefetch : " << gflops << endl;
CUDA_SAFE_CALL(hipFree(dB));
CUDA_SAFE_CALL(hipFree(dA));
CUDA_SAFE_CALL(hipFree(dC));
/* With Managed memory begins */
CUDA_SAFE_CALL(hipMallocManaged(&dA,size));
CUDA_SAFE_CALL(hipMallocManaged(&dB,size));
CUDA_SAFE_CALL(hipMallocManaged(&dC,size));
dim3 threadBlock_um(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid_um(K,K);
// Initialize matrices
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
dA[j*N+i] = 2.f*(j+i);
dB[j*N+i] = 1.f*(j-i);
}
}
gettimeofday(&t1,0);
hipLaunchKernelGGL(( gpuMM_um), dim3(grid_um),dim3(threadBlock_um), 0, 0, dA,dB,dC,N);
CUDA_SAFE_CALL(hipDeviceSynchronize());
gettimeofday(&t2,0);
timersub(&t2,&t1,&tp);
tt = (double) tp.tv_sec + ((double) tp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Managed : " << gflops << endl;
CUDA_SAFE_CALL(hipFree(dA));
CUDA_SAFE_CALL(hipFree(dB));
CUDA_SAFE_CALL(hipFree(dC));
delete [] hA;
delete [] hB;
delete [] hC;
delete [] C;
cout << "Finished." << endl;
return 0;
}
| 514036cd182c41bd3e54db6c2dd24d466db2110d.cu | #include <iostream>
#include <cmath>
#include <cstdio>
#include <sys/time.h>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define BLOCK_SIZE 32
#define ERROR 1.0e-9
typedef unsigned long long int LONG;
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
cout << "Error at line " << line << " : " << cudaGetErrorString(ret) << endl;
exit(-1);
}
}
void printMat(double *A, LONG N)
{
LONG i,j;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
cout << A[i*N+j] << " ";
cout<<endl;
}
}
__global__ void gpuMM(double *A, double *B, double *C, LONG N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
LONG row = threadIdx.y;
LONG col = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.f;
for (LONG n = 0; n < N; n++)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
__global__ void gpuMM_um(double *A, double *B, double *C, LONG N)
{
// Matrix multiplication for NxN matrices C=A*B
// Each thread computes a single element of C
LONG row = blockIdx.y*blockDim.y + threadIdx.y;
LONG col = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.f;
for (LONG n = 0; n < N; ++n)
sum += A[row*N+n]*B[n*N+col];
C[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
struct timeval t1,t2, tnp, tp;
double tt, gflops;
// Perform matrix multiplication C = A*B
// where A, B and C are NxN matrices
// Restricted to matrices where N = K*BLOCK_SIZE;
LONG N,K;
cin >> K;
N = K*BLOCK_SIZE;
CUDA_SAFE_CALL(cudaSetDevice(0));
cout << "Executing Matrix Multiplcation" << endl;
cout << "Matrix size: " << N << "x" << N << endl;
// Allocate memory on the host
double *hA,*hB,*hC;
hA = new double[N*N];
hB = new double[N*N];
hC = new double[N*N];
// Initialize matrices on the host
srand(time(NULL));
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
hA[j*N+i] = drand48();
hB[j*N+i] = drand48();
}
}
// Allocate memory on the device
LONG size = N*N*sizeof(double); // Size of the memory in bytes
// Allocate memory to store the GPU answer on the host
double *C;
C = new double[N*N];
dim3 threadBlock(BLOCK_SIZE,K);
dim3 grid(K);
double *dA,*dB,*dC,*dAT,*dCT,*dTemp;
/* With prefetching begins */
CUDA_SAFE_CALL(cudaMallocHost(&dB,size));
CUDA_SAFE_CALL(cudaMallocHost(&dA,(K*size/N)));
CUDA_SAFE_CALL(cudaMallocHost(&dC,(K*size/N)));
CUDA_SAFE_CALL(cudaMallocHost(&dAT,(K*size/N)));
CUDA_SAFE_CALL(cudaMallocHost(&dCT,(K*size/N)));
cudaStream_t s1,s2,s3;
CUDA_SAFE_CALL(cudaStreamCreate(&s1));
CUDA_SAFE_CALL(cudaStreamCreate(&s2));
CUDA_SAFE_CALL(cudaStreamCreate(&s3));
gettimeofday(&t1,0);
// Copy matrices from the host to device
CUDA_SAFE_CALL(cudaMemcpyAsync(dB,hB,size,cudaMemcpyHostToDevice,s1));
CUDA_SAFE_CALL(cudaMemcpyAsync(dA,hA,K*(size/N),cudaMemcpyHostToDevice,s1));
gpuMM<<<grid,threadBlock,0,s1>>>(dA,dB,dC,N);
for(LONG i=1; i< (N/K); i++){
// Prefetch the next set of rows
CUDA_SAFE_CALL(cudaMemcpyAsync(dAT,hA+i*N*K,(K*size/N),cudaMemcpyHostToDevice,s2));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
//Swap pointers
dTemp = dAT;
dAT = dA;
dA = dTemp;
dTemp = dCT;
dCT = dC;
dC = dTemp;
//Execute the matrix multiplication kernel
gpuMM<<<grid,threadBlock,0,s1>>>(dA,dB,dC,N);
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(cudaMemcpyAsync(C+(i-1)*N*K,dCT,(K*size/N),cudaMemcpyDeviceToHost,s3));
}
CUDA_SAFE_CALL(cudaDeviceSynchronize());
CUDA_SAFE_CALL(cudaMemcpyAsync(C+((N/K)-1)*N*K,dC,(K*size/N),cudaMemcpyDeviceToHost,s3));
gettimeofday(&t2,0);
timersub(&t2,&t1,&tp);
tt = (double) tp.tv_sec + ((double) tp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Prefetch : " << gflops << endl;
CUDA_SAFE_CALL(cudaStreamDestroy(s1));
CUDA_SAFE_CALL(cudaStreamDestroy(s2));
CUDA_SAFE_CALL(cudaStreamDestroy(s3));
CUDA_SAFE_CALL(cudaFreeHost(dB));
CUDA_SAFE_CALL(cudaFreeHost(dA));
CUDA_SAFE_CALL(cudaFreeHost(dC));
CUDA_SAFE_CALL(cudaFreeHost(dAT));
CUDA_SAFE_CALL(cudaFreeHost(dCT));
/* Without prefetching begins */
CUDA_SAFE_CALL(cudaMalloc(&dB,size));
CUDA_SAFE_CALL(cudaMalloc(&dA,(K*size/N)));
CUDA_SAFE_CALL(cudaMalloc(&dC,(K*size/N)));
gettimeofday(&t1,0);
CUDA_SAFE_CALL(cudaMemcpy(dB,hB,size,cudaMemcpyHostToDevice));
for(LONG i=0; i< (N/K); i++){
//cout << "Iteration " << i << endl;
CUDA_SAFE_CALL(cudaMemcpy(dA,hA+i*N*K,(K*size/N),cudaMemcpyHostToDevice));
//Execute the matrix multiplication kernel
gpuMM<<<grid,threadBlock>>>(dA,dB,dC,N);
// Now copy the GPU result back to CPU
CUDA_SAFE_CALL(cudaMemcpy(C+i*N*K,dC,(K*size/N),cudaMemcpyDeviceToHost));
}
CUDA_SAFE_CALL(cudaDeviceSynchronize());
gettimeofday(&t2,0);
timersub(&t2,&t1,&tnp);
tt = (double) tnp.tv_sec + ((double) tnp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Without Prefetch : " << gflops << endl;
CUDA_SAFE_CALL(cudaFree(dB));
CUDA_SAFE_CALL(cudaFree(dA));
CUDA_SAFE_CALL(cudaFree(dC));
/* With Managed memory begins */
CUDA_SAFE_CALL(cudaMallocManaged(&dA,size));
CUDA_SAFE_CALL(cudaMallocManaged(&dB,size));
CUDA_SAFE_CALL(cudaMallocManaged(&dC,size));
dim3 threadBlock_um(BLOCK_SIZE,BLOCK_SIZE);
dim3 grid_um(K,K);
// Initialize matrices
for (LONG j=0; j<N; j++){
for (LONG i=0; i<N; i++){
dA[j*N+i] = 2.f*(j+i);
dB[j*N+i] = 1.f*(j-i);
}
}
gettimeofday(&t1,0);
gpuMM_um<<<grid_um,threadBlock_um>>>(dA,dB,dC,N);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
gettimeofday(&t2,0);
timersub(&t2,&t1,&tp);
tt = (double) tp.tv_sec + ((double) tp.tv_usec/1.0e6);
gflops = ( 1.0e-9 * 2.0 * N * N * N ) / tt;
cout << "Managed : " << gflops << endl;
CUDA_SAFE_CALL(cudaFree(dA));
CUDA_SAFE_CALL(cudaFree(dB));
CUDA_SAFE_CALL(cudaFree(dC));
delete [] hA;
delete [] hB;
delete [] hC;
delete [] C;
cout << "Finished." << endl;
return 0;
}
|
2a126a32349c6d28a10dfe9a3aed119b4c868736.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel2_y [4][2];
static int dims_advec_mom_kernel2_y_h [4][2] = {0};
//user function
__device__
inline void advec_mom_kernel2_y_gpu(ACC<double> &vel1,
const ACC<double> &node_mass_post,
const ACC<double> &node_mass_pre,
const ACC<double> &mom_flux) {
vel1(0,0,0) = ( vel1(0,0,0) * node_mass_pre(0,0,0) +
mom_flux(0,-1,0) - mom_flux(0,0,0) ) / node_mass_post(0,0,0);
}
__global__ void ops_advec_mom_kernel2_y(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_y[0][0] + idx_z * 1*1 * dims_advec_mom_kernel2_y[0][0] * dims_advec_mom_kernel2_y[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_y[1][0] + idx_z * 1*1 * dims_advec_mom_kernel2_y[1][0] * dims_advec_mom_kernel2_y[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_y[2][0] + idx_z * 1*1 * dims_advec_mom_kernel2_y[2][0] * dims_advec_mom_kernel2_y[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_y[3][0] + idx_z * 1*1 * dims_advec_mom_kernel2_y[3][0] * dims_advec_mom_kernel2_y[3][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel2_y[0][0], dims_advec_mom_kernel2_y[0][1], arg0);
const ACC<double> argp1(dims_advec_mom_kernel2_y[1][0], dims_advec_mom_kernel2_y[1][1], arg1);
const ACC<double> argp2(dims_advec_mom_kernel2_y[2][0], dims_advec_mom_kernel2_y[2][1], arg2);
const ACC<double> argp3(dims_advec_mom_kernel2_y[3][0], dims_advec_mom_kernel2_y[3][1], arg3);
advec_mom_kernel2_y_gpu(argp0, argp1, argp2, argp3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_y(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_advec_mom_kernel2_y_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,133)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(133,"advec_mom_kernel2_y");
OPS_kernels[133].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != dims_advec_mom_kernel2_y_h[0][0] || ydim0 != dims_advec_mom_kernel2_y_h[0][1] || xdim1 != dims_advec_mom_kernel2_y_h[1][0] || ydim1 != dims_advec_mom_kernel2_y_h[1][1] || xdim2 != dims_advec_mom_kernel2_y_h[2][0] || ydim2 != dims_advec_mom_kernel2_y_h[2][1] || xdim3 != dims_advec_mom_kernel2_y_h[3][0] || ydim3 != dims_advec_mom_kernel2_y_h[3][1]) {
dims_advec_mom_kernel2_y_h[0][0] = xdim0;
dims_advec_mom_kernel2_y_h[0][1] = ydim0;
dims_advec_mom_kernel2_y_h[1][0] = xdim1;
dims_advec_mom_kernel2_y_h[1][1] = ydim1;
dims_advec_mom_kernel2_y_h[2][0] = xdim2;
dims_advec_mom_kernel2_y_h[2][1] = ydim2;
dims_advec_mom_kernel2_y_h[3][0] = xdim3;
dims_advec_mom_kernel2_y_h[3][1] = ydim3;
cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel2_y, dims_advec_mom_kernel2_y_h, sizeof(dims_advec_mom_kernel2_y)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[133].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_advec_mom_kernel2_y), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[133].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[133].mpi_time += t2-t1;
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_y(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 133;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 133;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_advec_mom_kernel2_y_execute;
if (OPS_diags > 1) {
ops_timing_realloc(133,"advec_mom_kernel2_y");
}
ops_enqueue_kernel(desc);
}
#endif
| 2a126a32349c6d28a10dfe9a3aed119b4c868736.cu | //
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel2_y [4][2];
static int dims_advec_mom_kernel2_y_h [4][2] = {0};
//user function
__device__
inline void advec_mom_kernel2_y_gpu(ACC<double> &vel1,
const ACC<double> &node_mass_post,
const ACC<double> &node_mass_pre,
const ACC<double> &mom_flux) {
vel1(0,0,0) = ( vel1(0,0,0) * node_mass_pre(0,0,0) +
mom_flux(0,-1,0) - mom_flux(0,0,0) ) / node_mass_post(0,0,0);
}
__global__ void ops_advec_mom_kernel2_y(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_y[0][0] + idx_z * 1*1 * dims_advec_mom_kernel2_y[0][0] * dims_advec_mom_kernel2_y[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_y[1][0] + idx_z * 1*1 * dims_advec_mom_kernel2_y[1][0] * dims_advec_mom_kernel2_y[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_y[2][0] + idx_z * 1*1 * dims_advec_mom_kernel2_y[2][0] * dims_advec_mom_kernel2_y[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_y[3][0] + idx_z * 1*1 * dims_advec_mom_kernel2_y[3][0] * dims_advec_mom_kernel2_y[3][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel2_y[0][0], dims_advec_mom_kernel2_y[0][1], arg0);
const ACC<double> argp1(dims_advec_mom_kernel2_y[1][0], dims_advec_mom_kernel2_y[1][1], arg1);
const ACC<double> argp2(dims_advec_mom_kernel2_y[2][0], dims_advec_mom_kernel2_y[2][1], arg2);
const ACC<double> argp3(dims_advec_mom_kernel2_y[3][0], dims_advec_mom_kernel2_y[3][1], arg3);
advec_mom_kernel2_y_gpu(argp0, argp1, argp2, argp3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_y(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_advec_mom_kernel2_y_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,133)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(133,"advec_mom_kernel2_y");
OPS_kernels[133].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != dims_advec_mom_kernel2_y_h[0][0] || ydim0 != dims_advec_mom_kernel2_y_h[0][1] || xdim1 != dims_advec_mom_kernel2_y_h[1][0] || ydim1 != dims_advec_mom_kernel2_y_h[1][1] || xdim2 != dims_advec_mom_kernel2_y_h[2][0] || ydim2 != dims_advec_mom_kernel2_y_h[2][1] || xdim3 != dims_advec_mom_kernel2_y_h[3][0] || ydim3 != dims_advec_mom_kernel2_y_h[3][1]) {
dims_advec_mom_kernel2_y_h[0][0] = xdim0;
dims_advec_mom_kernel2_y_h[0][1] = ydim0;
dims_advec_mom_kernel2_y_h[1][0] = xdim1;
dims_advec_mom_kernel2_y_h[1][1] = ydim1;
dims_advec_mom_kernel2_y_h[2][0] = xdim2;
dims_advec_mom_kernel2_y_h[2][1] = ydim2;
dims_advec_mom_kernel2_y_h[3][0] = xdim3;
dims_advec_mom_kernel2_y_h[3][1] = ydim3;
cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel2_y, dims_advec_mom_kernel2_y_h, sizeof(dims_advec_mom_kernel2_y)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[133].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_advec_mom_kernel2_y<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[133].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[133].mpi_time += t2-t1;
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[133].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel2_y(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 133;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 133;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_advec_mom_kernel2_y_execute;
if (OPS_diags > 1) {
ops_timing_realloc(133,"advec_mom_kernel2_y");
}
ops_enqueue_kernel(desc);
}
#endif
|
3831e5ce09a319824dabf3c999cd771024a0d6b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "newtimer.h"
#define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void bodyForce(float4 *p, float4 *v, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float3 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float3(tpos.x, tpos.y, tpos.z);
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float dz = spos[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3;
}
__syncthreads();
}
v[i].x += dt*Fx; v[i].y += dt*Fy; v[i].z += dt*Fz;
}
}
int main(const int argc, const char** argv) {
StartTimer();
int nBodies = 100000;
int nIters = 20;
if (argc > 1) nBodies = atoi(argv[1]);
if (argc > 2) nIters = atoi(argv[2]);
const float dt = 0.01f; // time step
int bytes = 2*nBodies*sizeof(float4);
float *buf = (float*)malloc(bytes);
BodySystem p = { (float4*)buf, ((float4*)buf) + nBodies };
randomizeBodies(buf, 8*nBodies); // Init pos / vel data
float *d_buf;
hipMalloc(&d_buf, bytes);
BodySystem d_p = { (float4*)d_buf, ((float4*)d_buf) + nBodies };
int nBlocks = (nBodies + BLOCK_SIZE - 1) / BLOCK_SIZE;
double tStartLoop = 0.0;
double tEndLoop = 0.0;
double loopTime = 0.0;
for (int iter = 1; iter <= nIters; iter++) {
printf("iteration:%d\n", iter);
tStartLoop = GetTimer() / 1000.0;
hipMemcpy(d_buf, buf, bytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( bodyForce), dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, d_p.pos, d_p.vel, dt, nBodies);
hipMemcpy(buf, d_buf, bytes, hipMemcpyDeviceToHost);
tEndLoop = GetTimer() / 1000.0;
loopTime += tEndLoop - tStartLoop;
for (int i = 0 ; i < nBodies; i++) { // integrate position
p.pos[i].x += p.vel[i].x*dt;
p.pos[i].y += p.vel[i].y*dt;
p.pos[i].z += p.vel[i].z*dt;
}
}
free(buf);
hipFree(d_buf);
const double tEndTime = GetTimer() / 1000.0;
printf("percent of time in bodyForce: %f \n", loopTime/tEndTime);
}
| 3831e5ce09a319824dabf3c999cd771024a0d6b2.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "newtimer.h"
#define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void bodyForce(float4 *p, float4 *v, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float3 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float3(tpos.x, tpos.y, tpos.z);
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float dz = spos[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3;
}
__syncthreads();
}
v[i].x += dt*Fx; v[i].y += dt*Fy; v[i].z += dt*Fz;
}
}
int main(const int argc, const char** argv) {
StartTimer();
int nBodies = 100000;
int nIters = 20;
if (argc > 1) nBodies = atoi(argv[1]);
if (argc > 2) nIters = atoi(argv[2]);
const float dt = 0.01f; // time step
int bytes = 2*nBodies*sizeof(float4);
float *buf = (float*)malloc(bytes);
BodySystem p = { (float4*)buf, ((float4*)buf) + nBodies };
randomizeBodies(buf, 8*nBodies); // Init pos / vel data
float *d_buf;
cudaMalloc(&d_buf, bytes);
BodySystem d_p = { (float4*)d_buf, ((float4*)d_buf) + nBodies };
int nBlocks = (nBodies + BLOCK_SIZE - 1) / BLOCK_SIZE;
double tStartLoop = 0.0;
double tEndLoop = 0.0;
double loopTime = 0.0;
for (int iter = 1; iter <= nIters; iter++) {
printf("iteration:%d\n", iter);
tStartLoop = GetTimer() / 1000.0;
cudaMemcpy(d_buf, buf, bytes, cudaMemcpyHostToDevice);
bodyForce<<<nBlocks, BLOCK_SIZE>>>(d_p.pos, d_p.vel, dt, nBodies);
cudaMemcpy(buf, d_buf, bytes, cudaMemcpyDeviceToHost);
tEndLoop = GetTimer() / 1000.0;
loopTime += tEndLoop - tStartLoop;
for (int i = 0 ; i < nBodies; i++) { // integrate position
p.pos[i].x += p.vel[i].x*dt;
p.pos[i].y += p.vel[i].y*dt;
p.pos[i].z += p.vel[i].z*dt;
}
}
free(buf);
cudaFree(d_buf);
const double tEndTime = GetTimer() / 1000.0;
printf("percent of time in bodyForce: %f \n", loopTime/tEndTime);
}
|
074091201b5c250489dd6d4f3e69b75c9e862bc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stencil_runtime.h"
#include "cu_util.h"
#include "common.h"
#include "cpu_util.h"
#include "macro.h"
#include "compute_cpu.h"
#include <vector>
#include "data_util.h"
#include <mpi.h>
#include <iostream>
#include "time_util.h"
#include "array.h"
#include "buffer.h"
#include "compute_cuda.cu"
#include "CU_DS.h"
#include <stdio.h>
//#include "mpi_util.h"
using namespace std;
StencilRuntime::StencilRuntime(int num_dims, int unit_size,
const IndexArray &global_size,
int proc_num_dims,
IntArray proc_size,
int stencil_width, int num_iters):num_dims_(num_dims),
unit_size_(unit_size), global_size_(global_size),
proc_num_dims_(proc_num_dims), proc_size_(proc_size),
stencil_width_(stencil_width), num_iters_(num_iters),
current_iter_(0)
{
}
void StencilRuntime::StencilInit()
{
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank_);
this->num_gpus_ = GetGPUNumber();
this->num_devices_ = num_gpus_ + 1;
gv_ = new Grid_view(this->num_dims_, this->global_size_, this->proc_num_dims_, this->proc_size_, this->my_rank_);
grid_ = gv_->CreateGrid(unit_size_, num_dims_, global_size_, stencil_width_);
//allocate gpu grids pointers
gv_cuda_ = (Grid_view_cuda **)malloc(sizeof(Grid_view_cuda *) * num_gpus_);
grid_cuda_ = (GridCuda **)malloc(sizeof(GridCuda *) * num_gpus_);
internal_tiles = (vector <struct Tile> *)malloc(num_devices_ * sizeof(vector <struct Tile>));
border_tiles = (vector <struct Tile> *)malloc(num_devices_ * sizeof(vector <struct Tile>));
requests = (vector <MPI_Request> *)malloc(num_dims_ * sizeof(vector <MPI_Request>));
speeds_ = (double *)malloc(sizeof(double) * num_devices_);
//initial speed are equal
for(int i = 0; i < num_devices_; i++)
{
speeds_[i] = 1;
}
stencil_idx_ = 0;
cout<<"Rank: "<<my_rank_<<" Init done..."<<endl;
}
void StencilRuntime::along_dim()
{
int along_which = num_dims_ - 1;
//IndexArray size = grid_->my_size();
//for(int i = num_dims_ - 2; i >= 0; i--)
//{
// if(size[i] > size[along_which])
// {
// along_which = i;
// }
//}
this->along_ = along_which;
}
//split the per-node grid with initial parameters(evenly)
void StencilRuntime::split()
{
IndexArray size = grid_->my_size();
int along_size = size[along_];
starts_ = (int *)malloc(num_devices_ * sizeof(int));
along_partitions_ = (int *)malloc(num_devices_ * sizeof(int));
int accumulated = 0;
double total_speed = 0;
for(int i = 0; i < num_devices_; i++)
{
total_speed += speeds_[i];
}
for(int i = 0; i <= num_devices_ - 2; i++)
{
along_partitions_[i] = along_size*speeds_[i]/total_speed;
cout<<"#########along "<<i<<": "<<along_partitions_[i]<<endl;
starts_[i] = accumulated;
accumulated += along_partitions_[i];
}
along_partitions_[num_devices_ - 1] = along_size - accumulated;
cout<<"#########along "<<(num_devices_ - 1)<<": "<<along_partitions_[num_devices_ - 1]<<endl;
starts_[num_devices_ - 1] = accumulated;
}
//allocate grid views and grids
//also copies data into sub-grids
void StencilRuntime::create_grids()
{
IndexArray offset(stencil_width_, stencil_width_, stencil_width_);
IndexArray size = grid_->my_size();
size[along_] = along_partitions_[0];
//allocate cpu grids
gv_cpu_ = new Grid_view_cpu();
grid_cpu_ = gv_cpu_->CreateGrid(unit_size_, num_dims_, offset, size, stencil_width_, num_devices_, 0);
IndexArray my_offset(stencil_width_, stencil_width_, stencil_width_);
grid_cpu_->copy_host_to_host(my_offset, offset, grid_->buffer(), size);
cout<<"################copy to cpu done..."<<endl;
for(int i = 0; i <= num_gpus_ - 1; i++)
{
cout<<"copying GPU: "<<i<<endl;
//don't forget to set device
CUDA_SAFE_CALL(hipSetDevice(i));
if(current_iter_ == 0)
{
CUDA_SAFE_CALL(hipSetDeviceFlags(hipDeviceMapHost));
CUDA_SAFE_CALL(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
}
offset[along_] += along_partitions_[i];
size[along_] = along_partitions_[i+1];
//cout<<"gpu "<<i<<" size: "<<size<<endl;
gv_cuda_[i] = new Grid_view_cuda();
grid_cuda_[i] = gv_cuda_[i]->CreateGrid(unit_size_, num_dims_, offset, size, stencil_width_, num_devices_, i + 1);
grid_cuda_[i]->copy_host_to_device(my_offset, offset, size, grid_->buffer());
}
}
void *StencilRuntime::launch(void *arg)
{
int *compute_type = (int *)((void **)arg)[0];
int *ptr_type = (int *)((void **)arg)[1];
void *object = ((void **)arg)[2];
int device_type = *ptr_type;
StencilRuntime *runtime = (StencilRuntime *)object;
if(device_type == CPU)
{
pthread_t tid[CPU_THREADS];
Cargs args[CPU_THREADS];
double before_launch = rtclock();
for(int i = 0; i < CPU_THREADS; i++)
{
args[i].tid = i;
args[i].runtime = runtime;
if(*compute_type == INTERNAL)
{
args[i].tiles = &(runtime->internal_tiles)[0];
}
else
{
args[i].tiles = &(runtime->border_tiles)[0];
}
pthread_create(&tid[i], NULL, compute_tiles, &args[i]);
}
for(int j = 0; j < CPU_THREADS; j++)
{
pthread_join(tid[j], NULL);
}
double after_launch = rtclock();
printf("exec time: %f\n", after_launch - before_launch);
if(*compute_type == INTERNAL)
runtime->speeds_[0] = 1/(after_launch - before_launch);
}
return (void *)0;
}
void StencilRuntime::tile_grids()
{
//tile CPU internal
IndexArray internal_start;
IndexArray internal_size;
IndexArray my_size;
IndexArray my_real_size;
for(int i = 0; i < num_dims_; i++)
{
internal_start[i] += 2*stencil_width_;
internal_size[i] = grid_cpu_->my_size()[i] - 2*stencil_width_;
}
internal_tiles[0].clear();
tiling(num_dims_, internal_size, internal_start, CPU_TILE_SIZE, internal_tiles[0]);
cout<<"############internal size: "<<internal_size<<endl;
//cout<<"tiling done........"<<endl;
//for(int i = 0; i < internal_tiles[0].size(); i++)
//{
// cout<<"offset: "<<(internal_tiles[0])[i].offset<<"size: "<<(internal_tiles[0])[i].size<<endl;
//}
//tile CPU border
my_size = grid_cpu_->my_size();
my_real_size = grid_cpu_->my_real_size();
border_tiles[0].clear();
tiling_border(num_dims_, my_size, my_real_size, grid_cpu_->halo(), CPU_TILE_SIZE, border_tiles[0]);
//for(int i = 0; i < border_tiles[0].size(); i++)
//{
// cout<<"offset: "<<( border_tiles[0])[i].offset<<"size: "<<( border_tiles[0])[i].size<<endl;
//}
//printf("CPU internal tiles: %d, CPU border tiles: %d\n", internal_tiles[0].size(), border_tiles[0].size());
//tile GPUs internal
for(int j = 0; j < num_gpus_; j++)
{
IndexArray internal_start;
IndexArray internal_size;
for(int i = 0; i <num_dims_; i++)
{
internal_start[i] += 2*stencil_width_;
internal_size[i] = grid_cuda(j)->my_size()[i] - 2*stencil_width();
}
internal_tiles[j + 1].clear();
tiling(num_dims(), internal_size, internal_start, GPU_TILE_SIZE, internal_tiles[j + 1]);
my_size = grid_cuda_[j]->my_size();
my_real_size = grid_cuda_[j]->my_real_size();
//tile gpu border
border_tiles[j + 1].clear();
tiling_border(num_dims_, my_size, my_real_size, grid_cuda_[j]->halo(), GPU_TILE_SIZE, border_tiles[j + 1]);
}
//for(int i = 0; i < internal_tiles[2].size(); i++)
//{
// cout<<"my size: "<<grid_cuda_[0]->my_size()<<"offset: "<<( internal_tiles[1])[i].offset<<"size: "<<( internal_tiles[1])[i].size<<endl;
//}
}
void StencilRuntime::interdevice_exchange()
{
//inter device exchange
for(int i = 0; i < num_gpus_; i++)
{
int cur_device;
hipGetDevice(&cur_device);
if(i!=cur_device)
CUDA_SAFE_CALL(hipSetDevice(i));
//the highest dimension
grid_cuda_[i]->send_to_neighbors(along_, stencil_width_, grid_, grid_cpu_, grid_cuda_);
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
if(proc_size_[dim]>1)
{
//forward
grid_cuda_[i]->copy_device_to_map(dim, stencil_width_, true);
//backward
grid_cuda_[i]->copy_device_to_map(dim, stencil_width_, false);
}
}
}
grid_cpu_->send_to_neighbors(along_, stencil_width_, grid_, grid_cuda_);
//next, synchronize the previous operations
for(int i = 0; i < num_gpus_; i++)
{
CUDA_SAFE_CALL(hipSetDevice(i));
hipDeviceSynchronize();
}
//conduct synchronous operations
for(int i = 0; i < num_gpus_; i++)
{
grid_cuda_[i]->copy_map_to_neighbor(along_, stencil_width_, grid_, grid_cpu_);
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
//copy side halos to global grid
if(proc_size_[dim]>1)
{
//forward
grid_cuda_[i]->copy_map_to_global_grid(grid_, dim, starts_[i+1], along_partitions_[i+1], stencil_width_, true);
//backward
grid_cuda_[i]->copy_map_to_global_grid(grid_, dim, starts_[i+1], along_partitions_[i+1], stencil_width_, false);
}
}
}
//copy side to global
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
if(proc_size_[dim] > 1)
{
//forward
grid_cpu_->copy_to_global_grid(grid_, dim, starts_[0], along_partitions_[0], stencil_width_, true);
//backward
grid_cpu_->copy_to_global_grid(grid_, dim, starts_[0], along_partitions_[0], stencil_width_, false);
}
}
//exchange among nodes
//double before_exchange = rtclock();
//UnsignedArray halo(stencil_width_, stencil_width_, stencil_width_);
//Width2 w = {halo, halo};
//gv_->ExchangeBoundaries(grid_, w, false, false);
//MPI_Barrier(MPI_COMM_WORLD);
double before_ex = rtclock();
for(int i = grid_->num_dims() - 1; i >= 0; i--)
{
requests[i].clear();
gv_->ExchangeBoundariesAsync(grid_, i, stencil_width_, stencil_width_, false, false, requests[i]);
}
double before_internal = rtclock();
process_internal();
double after_internal = rtclock();
printf("PURE INTERNAL TIME: %f\n", after_internal - before_internal);
double copy_time = 0;
for(int i = grid_->num_dims() - 1; i >= 0; i--)
{
for (int j = 0; j < requests[i].size(); j++)
{
MPI_Request *req = &(requests[i][j]);
CHECK_MPI(MPI_Wait(req, MPI_STATUS_IGNORE));
double before_copy = rtclock();
grid_->CopyinHalo(i, stencil_width_, false);
grid_->CopyinHalo(i, stencil_width_, true);
double after_copy = rtclock();
copy_time += (after_copy - before_copy);
}
printf("DIM %d copy time: %f\n", i, copy_time);
}
double after_ex = rtclock();
printf("RANK: %d PURE EX TIME: %f, PURE COPY TIME: %f\n", my_rank_, after_ex - before_ex, copy_time);
MPI_Barrier(MPI_COMM_WORLD);
//double after_exchange = rtclock();
//printf("exchange time: %f\n", after_exchange - before_exchange);
//next step is to copy self halo buffer to sub devices
//synchronous operations first
//copy from global grid receive buffer to cpu local halo
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
if(proc_size_[dim] > 1)
{
//forward
grid_cpu_->copy_from_global_grid(grid_, dim, starts_[0], along_partitions_[0], stencil_width_, true);
//backward
grid_cpu_->copy_from_global_grid(grid_, dim, starts_[0], along_partitions_[0], stencil_width_, false);
}
}
for(int i = 0; i < num_gpus_; i++)
{
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
if(proc_size_[dim]>1)
{
//copy from global receive buffer to local mapped buffer
//forward
grid_cuda_[i]->copy_global_grid_to_map(grid_, dim, starts_[i+1], along_partitions_[i+1], stencil_width_, true);
//backward
grid_cuda_[i]->copy_global_grid_to_map(grid_, dim, starts_[i+1], along_partitions_[i+1], stencil_width_, false);
}
}
}
//asynchrounous operations next
for(int i = 0; i < num_gpus_; i++)
{
CUDA_SAFE_CALL(hipSetDevice(i));
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
if(proc_size_[dim]>1)
{
//copy from map buffer to gpu device halo
//forward
grid_cuda_[i]->copy_map_to_device(dim, stencil_width_, true);
//backward
grid_cuda_[i]->copy_map_to_device(dim, stencil_width_, false);
}
}
}
//next, synchronize the previous operations
for(int i = 0; i < num_gpus_; i++)
{
CUDA_SAFE_CALL(hipSetDevice(i));
hipDeviceSynchronize();
}
//copy top from global to gpu
grid_cuda_[num_gpus_ - 1]->copy_from_top(along_, stencil_width_, grid_);
//copy botton from global to cpu
grid_cpu_->copy_from_bottom(along_, stencil_width_, grid_);
}
void StencilRuntime::process_internal()
{
pthread_t tid[2];
int device_types[2];
device_types[0] = CPU;
device_types[1] = GPU;
int compute_type = INTERNAL;
void *arg1[3] = {&compute_type, &device_types[0], this};
//launch CPU
pthread_create(&tid[0], NULL, launch, arg1);
cout<<"CPU thread launched..."<<endl;
double before_gpu = rtclock();
for(int j = 0; j < num_gpus_; j++)
{
Ltile *tiles_d;
int *dims_d;
CUDA_SAFE_CALL(hipSetDevice(j));
hipMalloc((void **)&tiles_d, sizeof(Ltile) * internal_tiles[j + 1].size());
hipMalloc((void **)&dims_d, sizeof(int) * 3);
hipMemcpy(tiles_d, (Ltile *)&(internal_tiles[j + 1])[0], sizeof(Ltile) * internal_tiles[j + 1].size(), hipMemcpyHostToDevice);
hipMemcpy(dims_d, &(grid_cuda(j)->my_real_size())[0], sizeof(int)*3, hipMemcpyHostToDevice);
dim3 grid(GPU_BLOCKS, 1, 1);
dim3 block(GPU_THREADS, 1, 1);
hipLaunchKernelGGL(( compute_cuda_internal), dim3(grid), dim3(block), 0, 0,
internal_tiles[j + 1].size(),
tiles_d,
grid_cuda(j)->data_in(),
grid_cuda(j)->data_out(),
num_dims(),
unit_size(),
dims_d,
stencil_idx_
);
}
hipDeviceSynchronize();
checkCUDAError("~~~internal error checking...");
double after_gpu = rtclock();
for(int i = 0; i < num_gpus_; i++)
{
speeds_[i + 1] = 1/(after_gpu - before_gpu);
}
printf("gpu time: %f\n", after_gpu - before_gpu);
pthread_join(tid[0], NULL);
}
void StencilRuntime::process_border()
{
printf("processing border...\n");
pthread_t tid[2];
int device_types[2];
device_types[0] = CPU;
device_types[1] = GPU;
int compute_type = BORDER;
void *arg1[3] = {&compute_type , &device_types[0], this};
//launch CPU
pthread_create(&tid[0], NULL, launch, arg1);
cout<<"CPU thread launched..."<<endl;
double before_gpu = rtclock();
for(int j = 0; j < num_gpus_; j++)
{
Ltile *tiles_d;
int *dims_d;
CUDA_SAFE_CALL(hipSetDevice(j));
hipMalloc((void **)&tiles_d, sizeof(Ltile) * border_tiles[j + 1].size());
hipMalloc((void **)&dims_d, sizeof(int) * 3);
hipMemcpy(tiles_d, (Ltile *)&(border_tiles[j + 1])[0], sizeof(Ltile) * border_tiles[j + 1].size(), hipMemcpyHostToDevice);
hipMemcpy(dims_d, &(grid_cuda(j)->my_real_size())[0], sizeof(int)*3, hipMemcpyHostToDevice);
//printf("SIZE OF INDEXARRAY: %d\n", sizeof(IndexArray));
dim3 grid(GPU_BLOCKS, 1, 1);
dim3 block(GPU_THREADS, 1, 1);
hipLaunchKernelGGL(( compute_cuda_internal), dim3(grid), dim3(block), 0, 0,
border_tiles[j + 1].size(),
tiles_d,
grid_cuda(j)->data_in(),
grid_cuda(j)->data_out(),
num_dims(),
unit_size(),
dims_d,
stencil_idx_
);
}
hipDeviceSynchronize();
checkCUDAError("~~~border error checking...");
double after_gpu = rtclock();
printf("gpu time: %f\n", after_gpu - before_gpu);
pthread_join(tid[0], NULL);
}
void StencilRuntime::StencilBegin()
{
if(num_iters_>1)
{
profile_iter();
clean_grids();
current_iter_ = 1;
}
along_dim();
split();
create_grids();
tile_grids();
interdevice_exchange();
process_border();
current_iter_++;
double before = rtclock();
for(; current_iter_ <= num_iters_ - 1; current_iter_++)
{
printf("%d ====>ITER: %d\n", my_rank_, current_iter_);
double before_internal = rtclock();
interdevice_exchange();
double after_internal = rtclock();
printf("internal time: %f\n", after_internal - before_internal);
process_border();
double after_border = rtclock();
printf("border time: %f\n", after_border - after_internal);
}
double after = rtclock();
printf("#########RANK %d EXECUTION TIME: %f\n", my_rank_, after - before);
}
void StencilRuntime::profile_iter()
{
along_dim();
split();
create_grids();
tile_grids();
interdevice_exchange();
process_border();
}
void StencilRuntime::clean_grids()
{
delete gv_cpu_;
gv_cpu_ = NULL;
delete grid_cpu_;
grid_cpu_ = NULL;
for(int i = 0; i < num_gpus_; i++)
{
delete grid_cuda_[i];
grid_cuda_[i] = NULL;
delete gv_cuda_[i];
gv_cuda_[i] = NULL;
}
}
void StencilRuntime::StencilFinalize()
{
delete gv_;
delete grid_;
delete grid_cpu_;
delete gv_cpu_;
for(int i = 0; i < num_gpus_; i++)
{
delete grid_cuda_[i];
delete gv_cuda_[i];
}
free(grid_cuda_);
free(gv_cuda_);
free(internal_tiles);
free(border_tiles);
free(requests);
//MPI_Finalize();
}
| 074091201b5c250489dd6d4f3e69b75c9e862bc7.cu | #include "stencil_runtime.h"
#include "cu_util.h"
#include "common.h"
#include "cpu_util.h"
#include "macro.h"
#include "compute_cpu.h"
#include <vector>
#include "data_util.h"
#include <mpi.h>
#include <iostream>
#include "time_util.h"
#include "array.h"
#include "buffer.h"
#include "compute_cuda.cu"
#include "CU_DS.h"
#include <stdio.h>
//#include "mpi_util.h"
using namespace std;
StencilRuntime::StencilRuntime(int num_dims, int unit_size,
const IndexArray &global_size,
int proc_num_dims,
IntArray proc_size,
int stencil_width, int num_iters):num_dims_(num_dims),
unit_size_(unit_size), global_size_(global_size),
proc_num_dims_(proc_num_dims), proc_size_(proc_size),
stencil_width_(stencil_width), num_iters_(num_iters),
current_iter_(0)
{
}
void StencilRuntime::StencilInit()
{
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank_);
this->num_gpus_ = GetGPUNumber();
this->num_devices_ = num_gpus_ + 1;
gv_ = new Grid_view(this->num_dims_, this->global_size_, this->proc_num_dims_, this->proc_size_, this->my_rank_);
grid_ = gv_->CreateGrid(unit_size_, num_dims_, global_size_, stencil_width_);
//allocate gpu grids pointers
gv_cuda_ = (Grid_view_cuda **)malloc(sizeof(Grid_view_cuda *) * num_gpus_);
grid_cuda_ = (GridCuda **)malloc(sizeof(GridCuda *) * num_gpus_);
internal_tiles = (vector <struct Tile> *)malloc(num_devices_ * sizeof(vector <struct Tile>));
border_tiles = (vector <struct Tile> *)malloc(num_devices_ * sizeof(vector <struct Tile>));
requests = (vector <MPI_Request> *)malloc(num_dims_ * sizeof(vector <MPI_Request>));
speeds_ = (double *)malloc(sizeof(double) * num_devices_);
//initial speed are equal
for(int i = 0; i < num_devices_; i++)
{
speeds_[i] = 1;
}
stencil_idx_ = 0;
cout<<"Rank: "<<my_rank_<<" Init done..."<<endl;
}
void StencilRuntime::along_dim()
{
int along_which = num_dims_ - 1;
//IndexArray size = grid_->my_size();
//for(int i = num_dims_ - 2; i >= 0; i--)
//{
// if(size[i] > size[along_which])
// {
// along_which = i;
// }
//}
this->along_ = along_which;
}
//split the per-node grid with initial parameters(evenly)
void StencilRuntime::split()
{
IndexArray size = grid_->my_size();
int along_size = size[along_];
starts_ = (int *)malloc(num_devices_ * sizeof(int));
along_partitions_ = (int *)malloc(num_devices_ * sizeof(int));
int accumulated = 0;
double total_speed = 0;
for(int i = 0; i < num_devices_; i++)
{
total_speed += speeds_[i];
}
for(int i = 0; i <= num_devices_ - 2; i++)
{
along_partitions_[i] = along_size*speeds_[i]/total_speed;
cout<<"#########along "<<i<<": "<<along_partitions_[i]<<endl;
starts_[i] = accumulated;
accumulated += along_partitions_[i];
}
along_partitions_[num_devices_ - 1] = along_size - accumulated;
cout<<"#########along "<<(num_devices_ - 1)<<": "<<along_partitions_[num_devices_ - 1]<<endl;
starts_[num_devices_ - 1] = accumulated;
}
//allocate grid views and grids
//also copies data into sub-grids
void StencilRuntime::create_grids()
{
IndexArray offset(stencil_width_, stencil_width_, stencil_width_);
IndexArray size = grid_->my_size();
size[along_] = along_partitions_[0];
//allocate cpu grids
gv_cpu_ = new Grid_view_cpu();
grid_cpu_ = gv_cpu_->CreateGrid(unit_size_, num_dims_, offset, size, stencil_width_, num_devices_, 0);
IndexArray my_offset(stencil_width_, stencil_width_, stencil_width_);
grid_cpu_->copy_host_to_host(my_offset, offset, grid_->buffer(), size);
cout<<"################copy to cpu done..."<<endl;
for(int i = 0; i <= num_gpus_ - 1; i++)
{
cout<<"copying GPU: "<<i<<endl;
//don't forget to set device
CUDA_SAFE_CALL(cudaSetDevice(i));
if(current_iter_ == 0)
{
CUDA_SAFE_CALL(cudaSetDeviceFlags(cudaDeviceMapHost));
CUDA_SAFE_CALL(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
}
offset[along_] += along_partitions_[i];
size[along_] = along_partitions_[i+1];
//cout<<"gpu "<<i<<" size: "<<size<<endl;
gv_cuda_[i] = new Grid_view_cuda();
grid_cuda_[i] = gv_cuda_[i]->CreateGrid(unit_size_, num_dims_, offset, size, stencil_width_, num_devices_, i + 1);
grid_cuda_[i]->copy_host_to_device(my_offset, offset, size, grid_->buffer());
}
}
void *StencilRuntime::launch(void *arg)
{
int *compute_type = (int *)((void **)arg)[0];
int *ptr_type = (int *)((void **)arg)[1];
void *object = ((void **)arg)[2];
int device_type = *ptr_type;
StencilRuntime *runtime = (StencilRuntime *)object;
if(device_type == CPU)
{
pthread_t tid[CPU_THREADS];
Cargs args[CPU_THREADS];
double before_launch = rtclock();
for(int i = 0; i < CPU_THREADS; i++)
{
args[i].tid = i;
args[i].runtime = runtime;
if(*compute_type == INTERNAL)
{
args[i].tiles = &(runtime->internal_tiles)[0];
}
else
{
args[i].tiles = &(runtime->border_tiles)[0];
}
pthread_create(&tid[i], NULL, compute_tiles, &args[i]);
}
for(int j = 0; j < CPU_THREADS; j++)
{
pthread_join(tid[j], NULL);
}
double after_launch = rtclock();
printf("exec time: %f\n", after_launch - before_launch);
if(*compute_type == INTERNAL)
runtime->speeds_[0] = 1/(after_launch - before_launch);
}
return (void *)0;
}
void StencilRuntime::tile_grids()
{
//tile CPU internal
IndexArray internal_start;
IndexArray internal_size;
IndexArray my_size;
IndexArray my_real_size;
for(int i = 0; i < num_dims_; i++)
{
internal_start[i] += 2*stencil_width_;
internal_size[i] = grid_cpu_->my_size()[i] - 2*stencil_width_;
}
internal_tiles[0].clear();
tiling(num_dims_, internal_size, internal_start, CPU_TILE_SIZE, internal_tiles[0]);
cout<<"############internal size: "<<internal_size<<endl;
//cout<<"tiling done........"<<endl;
//for(int i = 0; i < internal_tiles[0].size(); i++)
//{
// cout<<"offset: "<<(internal_tiles[0])[i].offset<<"size: "<<(internal_tiles[0])[i].size<<endl;
//}
//tile CPU border
my_size = grid_cpu_->my_size();
my_real_size = grid_cpu_->my_real_size();
border_tiles[0].clear();
tiling_border(num_dims_, my_size, my_real_size, grid_cpu_->halo(), CPU_TILE_SIZE, border_tiles[0]);
//for(int i = 0; i < border_tiles[0].size(); i++)
//{
// cout<<"offset: "<<( border_tiles[0])[i].offset<<"size: "<<( border_tiles[0])[i].size<<endl;
//}
//printf("CPU internal tiles: %d, CPU border tiles: %d\n", internal_tiles[0].size(), border_tiles[0].size());
//tile GPUs internal
for(int j = 0; j < num_gpus_; j++)
{
IndexArray internal_start;
IndexArray internal_size;
for(int i = 0; i <num_dims_; i++)
{
internal_start[i] += 2*stencil_width_;
internal_size[i] = grid_cuda(j)->my_size()[i] - 2*stencil_width();
}
internal_tiles[j + 1].clear();
tiling(num_dims(), internal_size, internal_start, GPU_TILE_SIZE, internal_tiles[j + 1]);
my_size = grid_cuda_[j]->my_size();
my_real_size = grid_cuda_[j]->my_real_size();
//tile gpu border
border_tiles[j + 1].clear();
tiling_border(num_dims_, my_size, my_real_size, grid_cuda_[j]->halo(), GPU_TILE_SIZE, border_tiles[j + 1]);
}
//for(int i = 0; i < internal_tiles[2].size(); i++)
//{
// cout<<"my size: "<<grid_cuda_[0]->my_size()<<"offset: "<<( internal_tiles[1])[i].offset<<"size: "<<( internal_tiles[1])[i].size<<endl;
//}
}
void StencilRuntime::interdevice_exchange()
{
//inter device exchange
for(int i = 0; i < num_gpus_; i++)
{
int cur_device;
cudaGetDevice(&cur_device);
if(i!=cur_device)
CUDA_SAFE_CALL(cudaSetDevice(i));
//the highest dimension
grid_cuda_[i]->send_to_neighbors(along_, stencil_width_, grid_, grid_cpu_, grid_cuda_);
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
if(proc_size_[dim]>1)
{
//forward
grid_cuda_[i]->copy_device_to_map(dim, stencil_width_, true);
//backward
grid_cuda_[i]->copy_device_to_map(dim, stencil_width_, false);
}
}
}
grid_cpu_->send_to_neighbors(along_, stencil_width_, grid_, grid_cuda_);
//next, synchronize the previous operations
for(int i = 0; i < num_gpus_; i++)
{
CUDA_SAFE_CALL(cudaSetDevice(i));
cudaDeviceSynchronize();
}
//conduct synchronous operations
for(int i = 0; i < num_gpus_; i++)
{
grid_cuda_[i]->copy_map_to_neighbor(along_, stencil_width_, grid_, grid_cpu_);
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
//copy side halos to global grid
if(proc_size_[dim]>1)
{
//forward
grid_cuda_[i]->copy_map_to_global_grid(grid_, dim, starts_[i+1], along_partitions_[i+1], stencil_width_, true);
//backward
grid_cuda_[i]->copy_map_to_global_grid(grid_, dim, starts_[i+1], along_partitions_[i+1], stencil_width_, false);
}
}
}
//copy side to global
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
if(proc_size_[dim] > 1)
{
//forward
grid_cpu_->copy_to_global_grid(grid_, dim, starts_[0], along_partitions_[0], stencil_width_, true);
//backward
grid_cpu_->copy_to_global_grid(grid_, dim, starts_[0], along_partitions_[0], stencil_width_, false);
}
}
//exchange among nodes
//double before_exchange = rtclock();
//UnsignedArray halo(stencil_width_, stencil_width_, stencil_width_);
//Width2 w = {halo, halo};
//gv_->ExchangeBoundaries(grid_, w, false, false);
//MPI_Barrier(MPI_COMM_WORLD);
double before_ex = rtclock();
for(int i = grid_->num_dims() - 1; i >= 0; i--)
{
requests[i].clear();
gv_->ExchangeBoundariesAsync(grid_, i, stencil_width_, stencil_width_, false, false, requests[i]);
}
double before_internal = rtclock();
process_internal();
double after_internal = rtclock();
printf("PURE INTERNAL TIME: %f\n", after_internal - before_internal);
double copy_time = 0;
for(int i = grid_->num_dims() - 1; i >= 0; i--)
{
for (int j = 0; j < requests[i].size(); j++)
{
MPI_Request *req = &(requests[i][j]);
CHECK_MPI(MPI_Wait(req, MPI_STATUS_IGNORE));
double before_copy = rtclock();
grid_->CopyinHalo(i, stencil_width_, false);
grid_->CopyinHalo(i, stencil_width_, true);
double after_copy = rtclock();
copy_time += (after_copy - before_copy);
}
printf("DIM %d copy time: %f\n", i, copy_time);
}
double after_ex = rtclock();
printf("RANK: %d PURE EX TIME: %f, PURE COPY TIME: %f\n", my_rank_, after_ex - before_ex, copy_time);
MPI_Barrier(MPI_COMM_WORLD);
//double after_exchange = rtclock();
//printf("exchange time: %f\n", after_exchange - before_exchange);
//next step is to copy self halo buffer to sub devices
//synchronous operations first
//copy from global grid receive buffer to cpu local halo
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
if(proc_size_[dim] > 1)
{
//forward
grid_cpu_->copy_from_global_grid(grid_, dim, starts_[0], along_partitions_[0], stencil_width_, true);
//backward
grid_cpu_->copy_from_global_grid(grid_, dim, starts_[0], along_partitions_[0], stencil_width_, false);
}
}
for(int i = 0; i < num_gpus_; i++)
{
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
if(proc_size_[dim]>1)
{
//copy from global receive buffer to local mapped buffer
//forward
grid_cuda_[i]->copy_global_grid_to_map(grid_, dim, starts_[i+1], along_partitions_[i+1], stencil_width_, true);
//backward
grid_cuda_[i]->copy_global_grid_to_map(grid_, dim, starts_[i+1], along_partitions_[i+1], stencil_width_, false);
}
}
}
//asynchrounous operations next
for(int i = 0; i < num_gpus_; i++)
{
CUDA_SAFE_CALL(cudaSetDevice(i));
for(int dim = num_dims_ - 2; dim >= 0; dim--)
{
if(proc_size_[dim]>1)
{
//copy from map buffer to gpu device halo
//forward
grid_cuda_[i]->copy_map_to_device(dim, stencil_width_, true);
//backward
grid_cuda_[i]->copy_map_to_device(dim, stencil_width_, false);
}
}
}
//next, synchronize the previous operations
for(int i = 0; i < num_gpus_; i++)
{
CUDA_SAFE_CALL(cudaSetDevice(i));
cudaDeviceSynchronize();
}
//copy top from global to gpu
grid_cuda_[num_gpus_ - 1]->copy_from_top(along_, stencil_width_, grid_);
//copy botton from global to cpu
grid_cpu_->copy_from_bottom(along_, stencil_width_, grid_);
}
void StencilRuntime::process_internal()
{
pthread_t tid[2];
int device_types[2];
device_types[0] = CPU;
device_types[1] = GPU;
int compute_type = INTERNAL;
void *arg1[3] = {&compute_type, &device_types[0], this};
//launch CPU
pthread_create(&tid[0], NULL, launch, arg1);
cout<<"CPU thread launched..."<<endl;
double before_gpu = rtclock();
for(int j = 0; j < num_gpus_; j++)
{
Ltile *tiles_d;
int *dims_d;
CUDA_SAFE_CALL(cudaSetDevice(j));
cudaMalloc((void **)&tiles_d, sizeof(Ltile) * internal_tiles[j + 1].size());
cudaMalloc((void **)&dims_d, sizeof(int) * 3);
cudaMemcpy(tiles_d, (Ltile *)&(internal_tiles[j + 1])[0], sizeof(Ltile) * internal_tiles[j + 1].size(), cudaMemcpyHostToDevice);
cudaMemcpy(dims_d, &(grid_cuda(j)->my_real_size())[0], sizeof(int)*3, cudaMemcpyHostToDevice);
dim3 grid(GPU_BLOCKS, 1, 1);
dim3 block(GPU_THREADS, 1, 1);
compute_cuda_internal<<<grid, block, 0>>>
(
internal_tiles[j + 1].size(),
tiles_d,
grid_cuda(j)->data_in(),
grid_cuda(j)->data_out(),
num_dims(),
unit_size(),
dims_d,
stencil_idx_
);
}
cudaThreadSynchronize();
checkCUDAError("~~~internal error checking...");
double after_gpu = rtclock();
for(int i = 0; i < num_gpus_; i++)
{
speeds_[i + 1] = 1/(after_gpu - before_gpu);
}
printf("gpu time: %f\n", after_gpu - before_gpu);
pthread_join(tid[0], NULL);
}
void StencilRuntime::process_border()
{
printf("processing border...\n");
pthread_t tid[2];
int device_types[2];
device_types[0] = CPU;
device_types[1] = GPU;
int compute_type = BORDER;
void *arg1[3] = {&compute_type , &device_types[0], this};
//launch CPU
pthread_create(&tid[0], NULL, launch, arg1);
cout<<"CPU thread launched..."<<endl;
double before_gpu = rtclock();
for(int j = 0; j < num_gpus_; j++)
{
Ltile *tiles_d;
int *dims_d;
CUDA_SAFE_CALL(cudaSetDevice(j));
cudaMalloc((void **)&tiles_d, sizeof(Ltile) * border_tiles[j + 1].size());
cudaMalloc((void **)&dims_d, sizeof(int) * 3);
cudaMemcpy(tiles_d, (Ltile *)&(border_tiles[j + 1])[0], sizeof(Ltile) * border_tiles[j + 1].size(), cudaMemcpyHostToDevice);
cudaMemcpy(dims_d, &(grid_cuda(j)->my_real_size())[0], sizeof(int)*3, cudaMemcpyHostToDevice);
//printf("SIZE OF INDEXARRAY: %d\n", sizeof(IndexArray));
dim3 grid(GPU_BLOCKS, 1, 1);
dim3 block(GPU_THREADS, 1, 1);
compute_cuda_internal<<<grid, block, 0>>>
(
border_tiles[j + 1].size(),
tiles_d,
grid_cuda(j)->data_in(),
grid_cuda(j)->data_out(),
num_dims(),
unit_size(),
dims_d,
stencil_idx_
);
}
cudaThreadSynchronize();
checkCUDAError("~~~border error checking...");
double after_gpu = rtclock();
printf("gpu time: %f\n", after_gpu - before_gpu);
pthread_join(tid[0], NULL);
}
void StencilRuntime::StencilBegin()
{
if(num_iters_>1)
{
profile_iter();
clean_grids();
current_iter_ = 1;
}
along_dim();
split();
create_grids();
tile_grids();
interdevice_exchange();
process_border();
current_iter_++;
double before = rtclock();
for(; current_iter_ <= num_iters_ - 1; current_iter_++)
{
printf("%d ====>ITER: %d\n", my_rank_, current_iter_);
double before_internal = rtclock();
interdevice_exchange();
double after_internal = rtclock();
printf("internal time: %f\n", after_internal - before_internal);
process_border();
double after_border = rtclock();
printf("border time: %f\n", after_border - after_internal);
}
double after = rtclock();
printf("#########RANK %d EXECUTION TIME: %f\n", my_rank_, after - before);
}
void StencilRuntime::profile_iter()
{
along_dim();
split();
create_grids();
tile_grids();
interdevice_exchange();
process_border();
}
void StencilRuntime::clean_grids()
{
delete gv_cpu_;
gv_cpu_ = NULL;
delete grid_cpu_;
grid_cpu_ = NULL;
for(int i = 0; i < num_gpus_; i++)
{
delete grid_cuda_[i];
grid_cuda_[i] = NULL;
delete gv_cuda_[i];
gv_cuda_[i] = NULL;
}
}
void StencilRuntime::StencilFinalize()
{
delete gv_;
delete grid_;
delete grid_cpu_;
delete gv_cpu_;
for(int i = 0; i < num_gpus_; i++)
{
delete grid_cuda_[i];
delete gv_cuda_[i];
}
free(grid_cuda_);
free(gv_cuda_);
free(internal_tiles);
free(border_tiles);
free(requests);
//MPI_Finalize();
}
|
112dac0c74d34172e77885ca1d28a086a4bf82af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2016 Adam Sierakowski, The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_quadrature.h"
__device__ void rtp2xyz(real r, real theta, real phi, real *x, real *y, real *z)
{
*x = r * sin(theta) * cos(phi);
*y = r * sin(theta) * sin(phi);
*z = r * cos(theta);
}
__device__ void cart2sphere(real u, real v, real w, real theta, real phi,
real *ur, real *ut, real *up)
{
real st = sin(theta);
real ct = cos(theta);
real sp = sin(phi);
real cp = cos(phi);
*ur = st * (u * cp + v * sp) + w * ct;
*ut = ct * (u * cp + v * sp) - w * st;
*up = -u * sp + v * cp;
}
__global__ void check_nodes(int nparts, part_struct *parts, dom_struct *dom,
real *theta, real *phi, int nnodes, BC bc)
{
int node = threadIdx.x;
int part = blockIdx.x;
// convert node (r, theta, phi) to (x, y, z)
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp);
// shift from particle center
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
// start off with all -1's
parts[part].nodes[node] = -1;
// check if the node is interfered with by a wall
// compute distance between node and walls
// set equal to some number to identify which wall is interefering
if(x - dom->xs < 0) {
if(bc.uW == DIRICHLET || bc.vW == DIRICHLET || bc.wW == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -10;
} if(x - dom->xe > 0) {
if(bc.uE == DIRICHLET || bc.vE == DIRICHLET || bc.wE == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -11;
} if(y - dom->ys < 0) {
if(bc.uS == DIRICHLET || bc.vS == DIRICHLET || bc.wS == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -12;
} if(y - dom->ye > 0) {
if(bc.uN == DIRICHLET || bc.vN == DIRICHLET || bc.wN == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -13;
} if(z - dom->zs < 0) {
if(bc.uB == DIRICHLET || bc.vB == DIRICHLET || bc.wB == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -14;
} if(z - dom->ze > 0) {
if(bc.uT == DIRICHLET || bc.vT == DIRICHLET || bc.wT == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -15;
}
}
__global__ void interpolate_nodes(real *p0, real *p, real *u, real *v, real *w,
real rho_f, real nu, gradP_struct gradP,
part_struct *parts, dom_struct *dom, real *theta, real *phi, int nnodes,
real *pp, real *ur, real *ut, real *up, real dt0, real dt, BC bc)
{
int node = threadIdx.x;
int part = blockIdx.x;
// the node number of the intersecting node
int intnode = parts[part].nodes[node];
if(intnode < 0) intnode = part;
real ddx = 1. / dom->dx;
real ddy = 1. / dom->dy;
real ddz = 1. / dom->dz;
real ox = parts[part].ox;
real oy = parts[part].oy;
real oz = parts[part].oz;
real oxdot = parts[part].oxdot;
real oydot = parts[part].oydot;
real ozdot = parts[part].ozdot;
real udot = parts[part].udot;
real vdot = parts[part].vdot;
real wdot = parts[part].wdot;
real uu, vv, ww; // temporary nodes for Cartesian result of interpolation
real uuwall, vvwall, wwwall;
// convert node (r, theta, phi) to (x, y, z)
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp);
// shift from particle center
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
if(x < dom->xs && bc.uW == PERIODIC) x = x + dom->xl;
else if(x > dom->xe && bc.uE == PERIODIC) x = x - dom->xl;
if(y < dom->ys && bc.vS == PERIODIC) y = y + dom->yl;
else if(y > dom->ye && bc.vN == PERIODIC) y = y - dom->yl;
if(z < dom->zs && bc.wB == PERIODIC) z = z + dom->zl;
else if(z > dom->ze && bc.wT == PERIODIC) z = z - dom->zl;
__syncthreads();
// find index of cell containing node
int i = floor((x - dom->xs) * ddx) + DOM_BUF;
int j = floor((y - dom->ys) * ddy) + DOM_BUF;
int k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gcc.is) i = dom->Gcc.is;
if(j < dom->Gcc.js) j = dom->Gcc.js;
if(k < dom->Gcc.ks) k = dom->Gcc.ks;
if(i > dom->Gcc.ie-1) i = dom->Gcc.ie-1;
if(j > dom->Gcc.je-1) j = dom->Gcc.je-1;
if(k > dom->Gcc.ke-1) k = dom->Gcc.ke-1;
int C = i + j*dom->Gcc.s1b + k*dom->Gcc.s2b;
// Cartesian location of center of cell
real xx = (i-0.5) * dom->dx + dom->xs;
real yy = (j-0.5) * dom->dy + dom->ys;
real zz = (k-0.5) * dom->dz + dom->zs;
// interpolate pressure
real pc = p[C];
real pw = p[C-1];
real pe = p[C+1];
real ps = p[C-dom->Gcc.s1b];
real pn = p[C+dom->Gcc.s1b];
real pb = p[C-dom->Gcc.s2b];
real pt = p[C+dom->Gcc.s2b];
real dpdx = 0.5*(pe - pw) * ddx;
real dpdy = 0.5*(pn - ps) * ddy;
real dpdz = 0.5*(pt - pb) * ddz;
pp[node+nnodes*part] = pc + dpdx*(x-xx) + dpdy*(y-yy) + dpdz*(z-zz);
// switch to particle rest frame
real ocrossr2 = (oy*zp - oz*yp) * (oy*zp - oz*yp);
ocrossr2 += (ox*zp - oz*xp) * (ox*zp - oz*xp);
ocrossr2 += (ox*yp - oy*xp) * (ox*yp - oy*xp);
real rhoV = rho_f;
real accdotr = (-gradP.x/rhoV - udot)*xp + (-gradP.y/rhoV - vdot)*yp
+ (-gradP.z/rhoV - wdot)*zp;
pp[node+nnodes*part] -= 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// zero if this node intersects wall
pp[node+nnodes*part] = (parts[part].nodes[node]==-1)*pp[node+part*nnodes];
// interpolate velocities
// don't work with cell-center anymore;
// find closest cell face in x-direction
// interpolate u-velocity
i = round((x - dom->xs) * ddx) + DOM_BUF;
j = floor((y - dom->ys) * ddy) + DOM_BUF;
k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gfx.is) i = dom->Gfx.is;
if(j < dom->Gfx.js) j = dom->Gfx.js;
if(k < dom->Gfx.ks) k = dom->Gfx.ks;
if(i > dom->Gfx.ie-1) i = dom->Gfx.ie-1;
if(j > dom->Gfx.je-1) j = dom->Gfx.je-1;
if(k > dom->Gfx.ke-1) k = dom->Gfx.ke-1;
xx = (i-DOM_BUF) * dom->dx + dom->xs;
yy = (j-0.5) * dom->dy + dom->ys;
zz = (k-0.5) * dom->dz + dom->zs;
C = i + j*dom->Gfx.s1b + k*dom->Gfx.s2b;
real dudx = 0.5*(u[C+1] - u[C-1]) * ddx;
real dudy = 0.5*(u[C+dom->Gfx.s1b] - u[C-dom->Gfx.s1b]) * ddy;
real dudz = 0.5*(u[C+dom->Gfx.s2b] - u[C-dom->Gfx.s2b]) * ddz;
uu = u[C] + dudx * (x - xx) + dudy * (y - yy) + dudz * (z - zz);
// set uuwall equal to interfering wall u-velocity
uuwall = (parts[part].nodes[node] == -10)*bc.uWD
+ (parts[part].nodes[node] == -11)*bc.uED
+ (parts[part].nodes[node] == -12)*bc.uSD
+ (parts[part].nodes[node] == -13)*bc.uND
+ (parts[part].nodes[node] == -14)*bc.uBD
+ (parts[part].nodes[node] == -15)*bc.uTD;
// switch to particle rest frame
real rs3 = parts[part].rs*parts[part].rs*parts[part].rs;
real rs5 = rs3*parts[part].rs*parts[part].rs;
real a5 = parts[part].r*parts[part].r*parts[part].r*parts[part].r*parts[part].r;
real ocrossr_x = oy*zp - oz*yp;
real odotcrossr_x = oydot*zp - ozdot*yp;
uu -= parts[part].u + ocrossr_x;
uu -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_x;
uuwall -= parts[part].u + ocrossr_x;
uuwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_x;
// set actual node value based on whether it is interfered with
uu = (parts[part].nodes[node]==-1)*uu
+ (parts[part].nodes[node]<-1)*uuwall;
//printf("uu = %f uuwall = %f\n", uu + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x, uuwall + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x);
// interpolate v-velocity
i = floor((x - dom->xs) * ddx) + DOM_BUF;
j = round((y - dom->ys) * ddy) + DOM_BUF;
k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gfy.is) i = dom->Gfy.is;
if(j < dom->Gfy.js) j = dom->Gfy.js;
if(k < dom->Gfy.ks) k = dom->Gfy.ks;
if(i > dom->Gfy.ie-1) i = dom->Gfy.ie-1;
if(j > dom->Gfy.je-1) j = dom->Gfy.je-1;
if(k > dom->Gfy.ke-1) k = dom->Gfy.ke-1;
xx = (i-0.5) * dom->dx + dom->xs;
yy = (j-DOM_BUF) * dom->dy + dom->ys;
zz = (k-0.5) * dom->dz + dom->zs;
C = i + j*dom->Gfy.s1b + k*dom->Gfy.s2b;
real dvdx = 0.5*(v[C+1] - v[C-1]) * ddx;
real dvdy = 0.5*(v[C+dom->Gfy.s1b] - v[C-dom->Gfy.s1b]) * ddy;
real dvdz = 0.5*(v[C+dom->Gfy.s2b] - v[C-dom->Gfy.s2b]) * ddz;
vv = v[C] + dvdx * (x - xx) + dvdy * (y - yy) + dvdz * (z - zz);
// set vvwall equal to interfering wall v-velocity
vvwall = (parts[part].nodes[node] == -10)*bc.vWD
+ (parts[part].nodes[node] == -11)*bc.vED
+ (parts[part].nodes[node] == -12)*bc.vSD
+ (parts[part].nodes[node] == -13)*bc.vND
+ (parts[part].nodes[node] == -14)*bc.vBD
+ (parts[part].nodes[node] == -15)*bc.vTD;
// switch to particle rest frame
real ocrossr_y = -(ox*zp - oz*xp);
real odotcrossr_y = -(oxdot*zp - ozdot*xp);
vv -= parts[part].v + ocrossr_y;
vv -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_y;
vvwall -= parts[part].v + ocrossr_y;
vvwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_y;
// set actual node value based on whether it is interfered with
vv = (parts[part].nodes[node]==-1)*vv
+ (parts[part].nodes[node]<-1)*vvwall;
// interpolate w-velocity
i = floor((x - dom->xs) * ddx) + DOM_BUF;
j = floor((y - dom->ys) * ddy) + DOM_BUF;
k = round((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gfz.is) i = dom->Gfz.is;
if(j < dom->Gfz.js) j = dom->Gfz.js;
if(k < dom->Gfz.ks) k = dom->Gfz.ks;
if(i > dom->Gfz.ie-1) i = dom->Gfz.ie-1;
if(j > dom->Gfz.je-1) j = dom->Gfz.je-1;
if(k > dom->Gfz.ke-1) k = dom->Gfz.ke-1;
xx = (i-0.5) * dom->dx + dom->xs;
yy = (j-0.5) * dom->dy + dom->ys;
zz = (k-DOM_BUF) * dom->dz + dom->zs;
C = i + j*dom->Gfz.s1b + k*dom->Gfz.s2b;
real dwdx = 0.5*(w[C+1] - w[C-1]) * ddx;
real dwdy = 0.5*(w[C+dom->Gfz.s1b] - w[C-dom->Gfz.s1b]) * ddy;
real dwdz = 0.5*(w[C+dom->Gfz.s2b] - w[C-dom->Gfz.s2b]) * ddz;
ww = w[C] + dwdx * (x - xx) + dwdy * (y - yy) + dwdz * (z - zz);
// set uuwall equal to interfering wall u-velocity
wwwall = (parts[part].nodes[node] == -10)*bc.wWD
+ (parts[part].nodes[node] == -11)*bc.wED
+ (parts[part].nodes[node] == -12)*bc.wSD
+ (parts[part].nodes[node] == -13)*bc.wND
+ (parts[part].nodes[node] == -14)*bc.wBD
+ (parts[part].nodes[node] == -15)*bc.wTD;
// switch to particle rest frame
real ocrossr_z = ox*yp - oy*xp;
real odotcrossr_z = oxdot*yp - oydot*xp;
ww -= parts[part].w + ocrossr_z;
ww -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_z;
wwwall -= parts[part].w + ocrossr_z;
wwwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_z;
// set actual node value based on whether it is interfered with
ww = (parts[part].nodes[node]==-1)*ww
+ (parts[part].nodes[node]<-1)*wwwall;
// convert (uu, vv, ww) to (u_r, u_theta, u_phi) and write to node arrays
cart2sphere(uu, vv, ww, theta[node], phi[node],
&ur[node+part*nnodes], &ut[node+part*nnodes], &up[node+part*nnodes]);
//printf("%e %e u = %e v = %e w = %e\n", theta[node], phi[node], uu,vv,ww);
}
__device__ real nnm(int n, int m)
{
real fact_top = 1;
real fact_bot = 1;
for(int i = 1; i <= (n-m); i++) fact_top *= (real)i;
for(int i = 1; i <= (n+m); i++) fact_bot *= (real)i;
return sqrt((2.*n+1.) / 4. / PI * fact_top / fact_bot);
}
__device__ real pnm(int n, int m, real theta)
{
real x = cos(theta);
real y = sin(theta);
switch(n) {
case 0: return 1;
case 1:
switch(m) {
case -1: return 0.5*y; // double check!!
case 0: return x;
case 1: return -y;
}
case 2:
switch(m) {
case -2: return 0.125*y*y;
case -1: return 0.5*x*y; // double check!!
case 0: return 0.5*(3.*x*x - 1.);
case 1: return -3.*x*y;
case 2: return 3.*y*y;
}
case 3:
switch(m) {
case -3: return 0.02083333333333*y*y*y; // double check!
case -2: return 0.125*x*y*y;
case -1: return 0.125*(5.*x*x - 1.)*y; // double check!
case 0: return 0.5*x*(5.*x*x - 3.);
case 1: return -1.5*(5.*x*x - 1.)*y;
case 2: return 15.*x*y*y;
case 3: return -15.*y*y*y;
}
case 4:
switch(m) {
case -4: return .002604166666667*y*y*y*y;
case -3: return 0.02083333333333*x*y*y*y*y;
case -2: return 0.02083333333333*(7.*x*x - 1.)*y*y;
case -1: return 0.125*x*(7.*x*x - 1.)*y;
case 0: return 0.125*(35.*x*x*x*x - 30.*x*x + 3.);
case 1: return -2.5*(7.*x*x - 3.)*x*y;
case 2: return 7.5*(7.*x*x - 1.)*y*y;
case 3: return -105.*x*y*y*y;
case 4: return 105.*y*y*y*y;
}
case 5:
switch(m) {
case -5: return 0.000260416666667*y*y*y*y*y;
case -4: return 0.002604166666667*x*y*y*y*y;
case -3: return 0.002604166666667*y*y*y*(9.*x*x - 1.);
case -2: return 0.0625*x*y*y*(3.*x*x - 1.);
case -1: return 0.0625*(21.*x*x*x*x - 14.*x*x + 1.);
case 0: return 0.125*x*(63.*x*x*x*x - 70.*x*x + 15.);
case 1: return -1.875*y*(21.*x*x*x*x - 14.*x*x + 1.);
case 2: return 52.5*x*y*y*(3.*x*x - 1.);
case 3: return -52.5*y*y*y*(9.*x*x - 1.);
case 4: return 945.*x*y*y*y*y;
case 5: return -945.*y*y*y*y*y;
}
}
return 0; // this should never be reached
}
__global__ void cuda_get_coeffs(part_struct *parts,
int *nn, int *mm, real *node_t, real *node_p,
real *pp, real *ur, real *ut, real *up, real mu, real nu,
int stride, real *pnm_re, real *pnm_im,
real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im,
int nnodes, int ncoeffs, real A1, real A2, real A3, real B,
real *pnm_re0, real *pnm_im0,
real *phinm_re0, real *phinm_im0,
real *chinm_re0, real *chinm_im0,
real lambrelax)
{
int node = threadIdx.x;
int part = blockIdx.x;
int coeff = blockIdx.y;
real ars = parts[part].r / parts[part].rs;
real rsa = parts[part].rs / parts[part].r;
int i; // iterator
if(coeff < parts[part].ncoeff) {
// calculate integrand at each node
int j = part*nnodes*ncoeffs + coeff*nnodes + node;
int n = nn[coeff];
int m = mm[coeff];
real theta = node_t[node];
real phi = node_p[node];
real N_nm = nnm(n,m);
real P_nm = pnm(n,m,theta);
real P_n1m = pnm(n+1.,m,theta);
real dPdt = (n-m+1.)*P_n1m-(n+1.)*cos(theta)*P_nm;
real dPdp = m*P_nm;
int_Yp_re[j] = N_nm*P_nm*pp[node+part*nnodes]*cos(m*phi);
int_Yp_im[j] = -N_nm*P_nm*pp[node+part*nnodes]*sin(m*phi);
int_rDYu_re[j] = N_nm/sin(theta)*(dPdt*ut[node+part*nnodes]*cos(m*phi)
- dPdp*up[node+part*nnodes]*sin(m*phi));
int_rDYu_im[j] = N_nm/sin(theta)*(-dPdt*ut[node+part*nnodes]*sin(m*phi)
- dPdp*up[node+part*nnodes]*cos(m*phi));
int_xXDYu_re[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*sin(m*phi)
+ dPdt*up[node+part*nnodes]*cos(m*phi));
int_xXDYu_im[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*cos(m*phi)
- dPdt*up[node+part*nnodes]*sin(m*phi));
//if(n == 1 && m == 1) {
// printf("int_Yp_re(%f, %f) = %e\n", theta, phi, int_Yp_re[j]);
// printf("int_rDYu_re(%f, %f) = %e\n", theta, phi, int_rDYu_re[j]);
// printf("int_xXDYu_re(%f, %f) = %e ut = %e up = %e\n", theta, phi, int_xXDYu_re[j], ut[node+part*nnodes], up[node+part*nnodes]);
//}
__syncthreads();
// compute scalar products
// put sum into first node position for each coeff for each particle
if(node == 0) {
int_Yp_re[j] *= A1;
int_Yp_im[j] *= A1;
int_rDYu_re[j] *= A1;
int_rDYu_im[j] *= A1;
int_xXDYu_re[j] *= A1;
int_xXDYu_im[j] *= A1;
for(i = 1; i < 6; i++) {
int_Yp_re[j] += A1 * int_Yp_re[j+i];
int_Yp_im[j] += A1 * int_Yp_im[j+i];
int_rDYu_re[j] += A1 * int_rDYu_re[j+i];
int_rDYu_im[j] += A1 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A1 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A1 * int_xXDYu_im[j+i];
}
for(i = 6; i < 18; i++) {
int_Yp_re[j] += A2 * int_Yp_re[j+i];
int_Yp_im[j] += A2 * int_Yp_im[j+i];
int_rDYu_re[j] += A2 * int_rDYu_re[j+i];
int_rDYu_im[j] += A2 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A2 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A2 * int_xXDYu_im[j+i];
}
for(i = 18; i < 26; i++) {
int_Yp_re[j] += A3 * int_Yp_re[j+i];
int_Yp_im[j] += A3 * int_Yp_im[j+i];
int_rDYu_re[j] += A3 * int_rDYu_re[j+i];
int_rDYu_im[j] += A3 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A3 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A3 * int_xXDYu_im[j+i];
}
/*for(i = 26; i < 50; i++) {
int_Yp_re[j] += B * int_Yp_re[j+i];
int_Yp_im[j] += B * int_Yp_im[j+i];
int_rDYu_re[j] += B * int_rDYu_re[j+i];
int_rDYu_im[j] += B * int_rDYu_im[j+i];
int_xXDYu_re[j] += B * int_xXDYu_re[j+i];
int_xXDYu_im[j] += B * int_xXDYu_im[j+i];
}
*/
//if(n == 1 && m == 1) {
// printf("int_Yp_re = %e\n", int_Yp_re[j]);
// printf("int_rDYu_re = %e\n", int_rDYu_re[j]);
// printf("int_xXDYu_re = %e\n", int_xXDYu_re[j]);
//}
#ifdef TEST
real relax = 1.0;
#else
real relax = lambrelax;
#endif
if(n == 0) {
pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff]
+ relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*pow(ars,n)
- pnm_re0[stride*part+coeff]);
pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff]
+ relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*pow(ars,n)
- pnm_im0[stride*part+coeff]);
phinm_re[stride*part+coeff] = 0.;
phinm_im[stride*part+coeff] = 0.;
chinm_re[stride*part+coeff] = 0.;
chinm_im[stride*part+coeff] = 0.;
} else {
// calculate p_nm and phi_nm
real A = (1.-0.5*n*(2.*n-1.)/(n+1.)*pow(ars,2.*n+1.))*pow(rsa,n);
real B = n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ars,n+1.);
real C = 0.25*n*(2.*(n+3.)/(2.*n+3.)
+ (n-2.-n*(2.*n+1.)/(2.*n+3.)*ars*ars)*pow(ars,2.*n+1.))*pow(rsa,n+1.);
real D = n*(n+1.+0.5*((n-2.)*(2.*n+1.)*rsa*rsa
- n*(2.*n-1.))*pow(ars,2.*n+1.))*pow(rsa,n-1.);
pnm_re[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu
*int_Yp_re[j]*D + parts[part].r/nu*int_rDYu_re[j]*B) / (A*D+B*C);
pnm_im[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu
*int_Yp_im[j]*D + parts[part].r/nu*int_rDYu_im[j]*B) / (A*D+B*C);
phinm_re[stride*part+coeff] = (parts[part].r/nu*int_rDYu_re[j]*A
- parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*C) / (A*D+B*C);
phinm_im[stride*part+coeff] = (parts[part].r/nu*int_rDYu_im[j]*A
- parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*C) / (A*D+B*C);
// calculate chi_nm
real E = n*(n+1.)*(pow(ars,2.*n+1.)-1.)*pow(rsa, n);
chinm_re[stride*part+coeff] = parts[part].r/nu*int_xXDYu_re[j] / E;
chinm_im[stride*part+coeff] = parts[part].r/nu*int_xXDYu_im[j] / E;
// apply underrelaxation
pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff]*(1.-relax)
+ relax*pnm_re[stride*part+coeff];
pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff]*(1.-relax)
+ relax*pnm_im[stride*part+coeff];
phinm_re[stride*part+coeff] = phinm_re0[stride*part+coeff]*(1.-relax)
+ relax*phinm_re[stride*part+coeff];
phinm_im[stride*part+coeff] = phinm_im0[stride*part+coeff]*(1.-relax)
+ relax*phinm_im[stride*part+coeff];
chinm_re[stride*part+coeff] = chinm_re0[stride*part+coeff]*(1.-relax)
+ relax*chinm_re[stride*part+coeff];
chinm_im[stride*part+coeff] = chinm_im0[stride*part+coeff]*(1.-relax)
+ relax*chinm_im[stride*part+coeff];
//printf("pnm_re(%d,%d) = %e\n", n,m, pnm_re[stride*part+coeff]);
}
}
}
}
__global__ void cuda_calc_forces(dom_struct *dom, part_struct *parts,
int nparts, gradP_struct gradP,
real rho_f, real mu, real nu, int stride,
real *pnm_re, real *pnm_im,
real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number
if(pp < nparts) {
real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r;
real N10 = sqrt(3./4./PI);
real N11 = sqrt(3./8./PI);
parts[pp].Fx = rho_f * vol * (parts[pp].udot + gradP.x/rho_f)
- PI * mu * nu * 2.*N11 * (pnm_re[stride*pp + 2]
+ 6.*phinm_re[stride*pp + 2]);
parts[pp].Fy = rho_f * vol * (parts[pp].vdot + gradP.y/rho_f)
+ PI * mu * nu * 2.*N11 * (pnm_im[stride*pp + 2]
+ 6.*phinm_im[stride*pp + 2]);
parts[pp].Fz = rho_f * vol * (parts[pp].wdot + gradP.z/rho_f)
+ PI * mu * nu * N10 * (pnm_re[stride*pp + 1]
+ 6.*phinm_re[stride*pp + 1]);
parts[pp].Lx = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oxdot
- 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_re[stride*pp + 2];
parts[pp].Ly = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oydot
+ 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_im[stride*pp + 2];
parts[pp].Lz = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].ozdot
+ 8. * PI * mu * nu * N10 * parts[pp].r * chinm_re[stride*pp + 1];
}
}
__global__ void compute_error(real lamb_cut, int stride, int nparts,
real *pnm_re, real *pnm_re0, real *pnm_im, real *pnm_im0,
real *phinm_re, real *phinm_re0, real *phinm_im, real *phinm_im0,
real *chinm_re, real *chinm_re0, real *chinm_im, real *chinm_im0,
real *coeffs, real *errors, real *part_errors, dom_struct *dom, real nu)
{
int part = blockIdx.x;
int i,j;
real tmp = FLT_MIN;
int loc = 0;
real avg = 0;
real div = 0;
// create shared memory space
__shared__ real s_coeffs[6*21]; // ** have to hard-code this length **
__shared__ real s_coeffs0[6*21]; // ** have to hard-code this length **
// using 6 coefficient sets, each holding
// a maximum of 21 coefficients (5th-order
// truncation)
// copy coeffs for this particle into shared memory
for(i = 0; i < stride; i++) {
s_coeffs[i] = pnm_re[part*stride+i];
s_coeffs[i+1*stride] = pnm_im[part*stride+i];
s_coeffs[i+2*stride] = phinm_re[part*stride+i];
s_coeffs[i+3*stride] = phinm_im[part*stride+i];
s_coeffs[i+4*stride] = chinm_re[part*stride+i];
s_coeffs[i+5*stride] = chinm_im[part*stride+i];
s_coeffs0[i] = pnm_re0[part*stride+i];
s_coeffs0[i+1*stride] = pnm_im0[part*stride+i];
s_coeffs0[i+2*stride] = phinm_re0[part*stride+i];
s_coeffs0[i+3*stride] = phinm_im0[part*stride+i];
s_coeffs0[i+4*stride] = chinm_re0[part*stride+i];
s_coeffs0[i+5*stride] = chinm_im0[part*stride+i];
}
// compute the average of the coefficients
for(i = 0; i < stride*6; i++) {
avg += s_coeffs[i]*s_coeffs[i];
}
avg = avg / (stride*6.);
// sort the coefficients in shared memory and calculate errors along the way
for(i = 0; i < 6*stride; i++) {
// search for the largest magnitude value in shared and store its location
tmp = FLT_MIN;
for(j = 0; j < 6*stride; j++) {
if(s_coeffs[j]*s_coeffs[j] > tmp) {
tmp = s_coeffs[j]*s_coeffs[j];
loc = j;
}
}
// move the largest value into sorted list
coeffs[part*stride+i] = s_coeffs[loc];
// if its corresponding coefficient has large enough magnitude,
// compute error for this coefficient
if(fabs(s_coeffs[loc]) > lamb_cut*fabs(coeffs[part*stride+0])) {
div = fabs(s_coeffs[loc]);// + fabs(avg)*1e-4;
if(div < 1e-16) div = 1e-16;
errors[part*stride+i] = fabs((s_coeffs[loc] - s_coeffs0[loc]) / div);
} else errors[part*stride+i] = 0.;
// discard this value since we've used it once
s_coeffs[loc] = 0.;
}
// find the largest error for each particle
tmp = FLT_MIN;
for(i = 0; i < 6*stride; i++) {
if(errors[part*stride+i] > tmp) tmp = errors[part*stride+i];
}
// write error to return for each particle
part_errors[part] = tmp;
}
| 112dac0c74d34172e77885ca1d28a086a4bf82af.cu | /*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2016 Adam Sierakowski, The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include "cuda_quadrature.h"
__device__ void rtp2xyz(real r, real theta, real phi, real *x, real *y, real *z)
{
*x = r * sin(theta) * cos(phi);
*y = r * sin(theta) * sin(phi);
*z = r * cos(theta);
}
__device__ void cart2sphere(real u, real v, real w, real theta, real phi,
real *ur, real *ut, real *up)
{
real st = sin(theta);
real ct = cos(theta);
real sp = sin(phi);
real cp = cos(phi);
*ur = st * (u * cp + v * sp) + w * ct;
*ut = ct * (u * cp + v * sp) - w * st;
*up = -u * sp + v * cp;
}
__global__ void check_nodes(int nparts, part_struct *parts, dom_struct *dom,
real *theta, real *phi, int nnodes, BC bc)
{
int node = threadIdx.x;
int part = blockIdx.x;
// convert node (r, theta, phi) to (x, y, z)
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp);
// shift from particle center
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
// start off with all -1's
parts[part].nodes[node] = -1;
// check if the node is interfered with by a wall
// compute distance between node and walls
// set equal to some number to identify which wall is interefering
if(x - dom->xs < 0) {
if(bc.uW == DIRICHLET || bc.vW == DIRICHLET || bc.wW == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -10;
} if(x - dom->xe > 0) {
if(bc.uE == DIRICHLET || bc.vE == DIRICHLET || bc.wE == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -11;
} if(y - dom->ys < 0) {
if(bc.uS == DIRICHLET || bc.vS == DIRICHLET || bc.wS == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -12;
} if(y - dom->ye > 0) {
if(bc.uN == DIRICHLET || bc.vN == DIRICHLET || bc.wN == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -13;
} if(z - dom->zs < 0) {
if(bc.uB == DIRICHLET || bc.vB == DIRICHLET || bc.wB == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -14;
} if(z - dom->ze > 0) {
if(bc.uT == DIRICHLET || bc.vT == DIRICHLET || bc.wT == DIRICHLET)
if(parts[part].nodes[node] == -1)
parts[part].nodes[node] = -15;
}
}
__global__ void interpolate_nodes(real *p0, real *p, real *u, real *v, real *w,
real rho_f, real nu, gradP_struct gradP,
part_struct *parts, dom_struct *dom, real *theta, real *phi, int nnodes,
real *pp, real *ur, real *ut, real *up, real dt0, real dt, BC bc)
{
int node = threadIdx.x;
int part = blockIdx.x;
// the node number of the intersecting node
int intnode = parts[part].nodes[node];
if(intnode < 0) intnode = part;
real ddx = 1. / dom->dx;
real ddy = 1. / dom->dy;
real ddz = 1. / dom->dz;
real ox = parts[part].ox;
real oy = parts[part].oy;
real oz = parts[part].oz;
real oxdot = parts[part].oxdot;
real oydot = parts[part].oydot;
real ozdot = parts[part].ozdot;
real udot = parts[part].udot;
real vdot = parts[part].vdot;
real wdot = parts[part].wdot;
real uu, vv, ww; // temporary nodes for Cartesian result of interpolation
real uuwall, vvwall, wwwall;
// convert node (r, theta, phi) to (x, y, z)
real xp, yp, zp; // Cartesian radial vector
real x, y, z; // Cartesian location of node
rtp2xyz(parts[part].rs, theta[node], phi[node], &xp, &yp, &zp);
// shift from particle center
x = xp + parts[part].x;
y = yp + parts[part].y;
z = zp + parts[part].z;
if(x < dom->xs && bc.uW == PERIODIC) x = x + dom->xl;
else if(x > dom->xe && bc.uE == PERIODIC) x = x - dom->xl;
if(y < dom->ys && bc.vS == PERIODIC) y = y + dom->yl;
else if(y > dom->ye && bc.vN == PERIODIC) y = y - dom->yl;
if(z < dom->zs && bc.wB == PERIODIC) z = z + dom->zl;
else if(z > dom->ze && bc.wT == PERIODIC) z = z - dom->zl;
__syncthreads();
// find index of cell containing node
int i = floor((x - dom->xs) * ddx) + DOM_BUF;
int j = floor((y - dom->ys) * ddy) + DOM_BUF;
int k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gcc.is) i = dom->Gcc.is;
if(j < dom->Gcc.js) j = dom->Gcc.js;
if(k < dom->Gcc.ks) k = dom->Gcc.ks;
if(i > dom->Gcc.ie-1) i = dom->Gcc.ie-1;
if(j > dom->Gcc.je-1) j = dom->Gcc.je-1;
if(k > dom->Gcc.ke-1) k = dom->Gcc.ke-1;
int C = i + j*dom->Gcc.s1b + k*dom->Gcc.s2b;
// Cartesian location of center of cell
real xx = (i-0.5) * dom->dx + dom->xs;
real yy = (j-0.5) * dom->dy + dom->ys;
real zz = (k-0.5) * dom->dz + dom->zs;
// interpolate pressure
real pc = p[C];
real pw = p[C-1];
real pe = p[C+1];
real ps = p[C-dom->Gcc.s1b];
real pn = p[C+dom->Gcc.s1b];
real pb = p[C-dom->Gcc.s2b];
real pt = p[C+dom->Gcc.s2b];
real dpdx = 0.5*(pe - pw) * ddx;
real dpdy = 0.5*(pn - ps) * ddy;
real dpdz = 0.5*(pt - pb) * ddz;
pp[node+nnodes*part] = pc + dpdx*(x-xx) + dpdy*(y-yy) + dpdz*(z-zz);
// switch to particle rest frame
real ocrossr2 = (oy*zp - oz*yp) * (oy*zp - oz*yp);
ocrossr2 += (ox*zp - oz*xp) * (ox*zp - oz*xp);
ocrossr2 += (ox*yp - oy*xp) * (ox*yp - oy*xp);
real rhoV = rho_f;
real accdotr = (-gradP.x/rhoV - udot)*xp + (-gradP.y/rhoV - vdot)*yp
+ (-gradP.z/rhoV - wdot)*zp;
pp[node+nnodes*part] -= 0.5 * rho_f * ocrossr2 + rho_f * accdotr;
// zero if this node intersects wall
pp[node+nnodes*part] = (parts[part].nodes[node]==-1)*pp[node+part*nnodes];
// interpolate velocities
// don't work with cell-center anymore;
// find closest cell face in x-direction
// interpolate u-velocity
i = round((x - dom->xs) * ddx) + DOM_BUF;
j = floor((y - dom->ys) * ddy) + DOM_BUF;
k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gfx.is) i = dom->Gfx.is;
if(j < dom->Gfx.js) j = dom->Gfx.js;
if(k < dom->Gfx.ks) k = dom->Gfx.ks;
if(i > dom->Gfx.ie-1) i = dom->Gfx.ie-1;
if(j > dom->Gfx.je-1) j = dom->Gfx.je-1;
if(k > dom->Gfx.ke-1) k = dom->Gfx.ke-1;
xx = (i-DOM_BUF) * dom->dx + dom->xs;
yy = (j-0.5) * dom->dy + dom->ys;
zz = (k-0.5) * dom->dz + dom->zs;
C = i + j*dom->Gfx.s1b + k*dom->Gfx.s2b;
real dudx = 0.5*(u[C+1] - u[C-1]) * ddx;
real dudy = 0.5*(u[C+dom->Gfx.s1b] - u[C-dom->Gfx.s1b]) * ddy;
real dudz = 0.5*(u[C+dom->Gfx.s2b] - u[C-dom->Gfx.s2b]) * ddz;
uu = u[C] + dudx * (x - xx) + dudy * (y - yy) + dudz * (z - zz);
// set uuwall equal to interfering wall u-velocity
uuwall = (parts[part].nodes[node] == -10)*bc.uWD
+ (parts[part].nodes[node] == -11)*bc.uED
+ (parts[part].nodes[node] == -12)*bc.uSD
+ (parts[part].nodes[node] == -13)*bc.uND
+ (parts[part].nodes[node] == -14)*bc.uBD
+ (parts[part].nodes[node] == -15)*bc.uTD;
// switch to particle rest frame
real rs3 = parts[part].rs*parts[part].rs*parts[part].rs;
real rs5 = rs3*parts[part].rs*parts[part].rs;
real a5 = parts[part].r*parts[part].r*parts[part].r*parts[part].r*parts[part].r;
real ocrossr_x = oy*zp - oz*yp;
real odotcrossr_x = oydot*zp - ozdot*yp;
uu -= parts[part].u + ocrossr_x;
uu -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_x;
uuwall -= parts[part].u + ocrossr_x;
uuwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_x;
// set actual node value based on whether it is interfered with
uu = (parts[part].nodes[node]==-1)*uu
+ (parts[part].nodes[node]<-1)*uuwall;
//printf("uu = %f uuwall = %f\n", uu + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x, uuwall + parts[part].u + ocrossr_x + 0.1 / nu / rs3 * (rs5 - r5) * odotcrossr_x);
// interpolate v-velocity
i = floor((x - dom->xs) * ddx) + DOM_BUF;
j = round((y - dom->ys) * ddy) + DOM_BUF;
k = floor((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gfy.is) i = dom->Gfy.is;
if(j < dom->Gfy.js) j = dom->Gfy.js;
if(k < dom->Gfy.ks) k = dom->Gfy.ks;
if(i > dom->Gfy.ie-1) i = dom->Gfy.ie-1;
if(j > dom->Gfy.je-1) j = dom->Gfy.je-1;
if(k > dom->Gfy.ke-1) k = dom->Gfy.ke-1;
xx = (i-0.5) * dom->dx + dom->xs;
yy = (j-DOM_BUF) * dom->dy + dom->ys;
zz = (k-0.5) * dom->dz + dom->zs;
C = i + j*dom->Gfy.s1b + k*dom->Gfy.s2b;
real dvdx = 0.5*(v[C+1] - v[C-1]) * ddx;
real dvdy = 0.5*(v[C+dom->Gfy.s1b] - v[C-dom->Gfy.s1b]) * ddy;
real dvdz = 0.5*(v[C+dom->Gfy.s2b] - v[C-dom->Gfy.s2b]) * ddz;
vv = v[C] + dvdx * (x - xx) + dvdy * (y - yy) + dvdz * (z - zz);
// set vvwall equal to interfering wall v-velocity
vvwall = (parts[part].nodes[node] == -10)*bc.vWD
+ (parts[part].nodes[node] == -11)*bc.vED
+ (parts[part].nodes[node] == -12)*bc.vSD
+ (parts[part].nodes[node] == -13)*bc.vND
+ (parts[part].nodes[node] == -14)*bc.vBD
+ (parts[part].nodes[node] == -15)*bc.vTD;
// switch to particle rest frame
real ocrossr_y = -(ox*zp - oz*xp);
real odotcrossr_y = -(oxdot*zp - ozdot*xp);
vv -= parts[part].v + ocrossr_y;
vv -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_y;
vvwall -= parts[part].v + ocrossr_y;
vvwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_y;
// set actual node value based on whether it is interfered with
vv = (parts[part].nodes[node]==-1)*vv
+ (parts[part].nodes[node]<-1)*vvwall;
// interpolate w-velocity
i = floor((x - dom->xs) * ddx) + DOM_BUF;
j = floor((y - dom->ys) * ddy) + DOM_BUF;
k = round((z - dom->zs) * ddz) + DOM_BUF;
if(i < dom->Gfz.is) i = dom->Gfz.is;
if(j < dom->Gfz.js) j = dom->Gfz.js;
if(k < dom->Gfz.ks) k = dom->Gfz.ks;
if(i > dom->Gfz.ie-1) i = dom->Gfz.ie-1;
if(j > dom->Gfz.je-1) j = dom->Gfz.je-1;
if(k > dom->Gfz.ke-1) k = dom->Gfz.ke-1;
xx = (i-0.5) * dom->dx + dom->xs;
yy = (j-0.5) * dom->dy + dom->ys;
zz = (k-DOM_BUF) * dom->dz + dom->zs;
C = i + j*dom->Gfz.s1b + k*dom->Gfz.s2b;
real dwdx = 0.5*(w[C+1] - w[C-1]) * ddx;
real dwdy = 0.5*(w[C+dom->Gfz.s1b] - w[C-dom->Gfz.s1b]) * ddy;
real dwdz = 0.5*(w[C+dom->Gfz.s2b] - w[C-dom->Gfz.s2b]) * ddz;
ww = w[C] + dwdx * (x - xx) + dwdy * (y - yy) + dwdz * (z - zz);
// set uuwall equal to interfering wall u-velocity
wwwall = (parts[part].nodes[node] == -10)*bc.wWD
+ (parts[part].nodes[node] == -11)*bc.wED
+ (parts[part].nodes[node] == -12)*bc.wSD
+ (parts[part].nodes[node] == -13)*bc.wND
+ (parts[part].nodes[node] == -14)*bc.wBD
+ (parts[part].nodes[node] == -15)*bc.wTD;
// switch to particle rest frame
real ocrossr_z = ox*yp - oy*xp;
real odotcrossr_z = oxdot*yp - oydot*xp;
ww -= parts[part].w + ocrossr_z;
ww -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_z;
wwwall -= parts[part].w + ocrossr_z;
wwwall -= 0.1/nu *(rs5-a5)/rs3 * odotcrossr_z;
// set actual node value based on whether it is interfered with
ww = (parts[part].nodes[node]==-1)*ww
+ (parts[part].nodes[node]<-1)*wwwall;
// convert (uu, vv, ww) to (u_r, u_theta, u_phi) and write to node arrays
cart2sphere(uu, vv, ww, theta[node], phi[node],
&ur[node+part*nnodes], &ut[node+part*nnodes], &up[node+part*nnodes]);
//printf("%e %e u = %e v = %e w = %e\n", theta[node], phi[node], uu,vv,ww);
}
__device__ real nnm(int n, int m)
{
real fact_top = 1;
real fact_bot = 1;
for(int i = 1; i <= (n-m); i++) fact_top *= (real)i;
for(int i = 1; i <= (n+m); i++) fact_bot *= (real)i;
return sqrt((2.*n+1.) / 4. / PI * fact_top / fact_bot);
}
__device__ real pnm(int n, int m, real theta)
{
real x = cos(theta);
real y = sin(theta);
switch(n) {
case 0: return 1;
case 1:
switch(m) {
case -1: return 0.5*y; // double check!!
case 0: return x;
case 1: return -y;
}
case 2:
switch(m) {
case -2: return 0.125*y*y;
case -1: return 0.5*x*y; // double check!!
case 0: return 0.5*(3.*x*x - 1.);
case 1: return -3.*x*y;
case 2: return 3.*y*y;
}
case 3:
switch(m) {
case -3: return 0.02083333333333*y*y*y; // double check!
case -2: return 0.125*x*y*y;
case -1: return 0.125*(5.*x*x - 1.)*y; // double check!
case 0: return 0.5*x*(5.*x*x - 3.);
case 1: return -1.5*(5.*x*x - 1.)*y;
case 2: return 15.*x*y*y;
case 3: return -15.*y*y*y;
}
case 4:
switch(m) {
case -4: return .002604166666667*y*y*y*y;
case -3: return 0.02083333333333*x*y*y*y*y;
case -2: return 0.02083333333333*(7.*x*x - 1.)*y*y;
case -1: return 0.125*x*(7.*x*x - 1.)*y;
case 0: return 0.125*(35.*x*x*x*x - 30.*x*x + 3.);
case 1: return -2.5*(7.*x*x - 3.)*x*y;
case 2: return 7.5*(7.*x*x - 1.)*y*y;
case 3: return -105.*x*y*y*y;
case 4: return 105.*y*y*y*y;
}
case 5:
switch(m) {
case -5: return 0.000260416666667*y*y*y*y*y;
case -4: return 0.002604166666667*x*y*y*y*y;
case -3: return 0.002604166666667*y*y*y*(9.*x*x - 1.);
case -2: return 0.0625*x*y*y*(3.*x*x - 1.);
case -1: return 0.0625*(21.*x*x*x*x - 14.*x*x + 1.);
case 0: return 0.125*x*(63.*x*x*x*x - 70.*x*x + 15.);
case 1: return -1.875*y*(21.*x*x*x*x - 14.*x*x + 1.);
case 2: return 52.5*x*y*y*(3.*x*x - 1.);
case 3: return -52.5*y*y*y*(9.*x*x - 1.);
case 4: return 945.*x*y*y*y*y;
case 5: return -945.*y*y*y*y*y;
}
}
return 0; // this should never be reached
}
__global__ void cuda_get_coeffs(part_struct *parts,
int *nn, int *mm, real *node_t, real *node_p,
real *pp, real *ur, real *ut, real *up, real mu, real nu,
int stride, real *pnm_re, real *pnm_im,
real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im,
real *int_Yp_re, real *int_Yp_im,
real *int_rDYu_re, real *int_rDYu_im,
real *int_xXDYu_re, real *int_xXDYu_im,
int nnodes, int ncoeffs, real A1, real A2, real A3, real B,
real *pnm_re0, real *pnm_im0,
real *phinm_re0, real *phinm_im0,
real *chinm_re0, real *chinm_im0,
real lambrelax)
{
int node = threadIdx.x;
int part = blockIdx.x;
int coeff = blockIdx.y;
real ars = parts[part].r / parts[part].rs;
real rsa = parts[part].rs / parts[part].r;
int i; // iterator
if(coeff < parts[part].ncoeff) {
// calculate integrand at each node
int j = part*nnodes*ncoeffs + coeff*nnodes + node;
int n = nn[coeff];
int m = mm[coeff];
real theta = node_t[node];
real phi = node_p[node];
real N_nm = nnm(n,m);
real P_nm = pnm(n,m,theta);
real P_n1m = pnm(n+1.,m,theta);
real dPdt = (n-m+1.)*P_n1m-(n+1.)*cos(theta)*P_nm;
real dPdp = m*P_nm;
int_Yp_re[j] = N_nm*P_nm*pp[node+part*nnodes]*cos(m*phi);
int_Yp_im[j] = -N_nm*P_nm*pp[node+part*nnodes]*sin(m*phi);
int_rDYu_re[j] = N_nm/sin(theta)*(dPdt*ut[node+part*nnodes]*cos(m*phi)
- dPdp*up[node+part*nnodes]*sin(m*phi));
int_rDYu_im[j] = N_nm/sin(theta)*(-dPdt*ut[node+part*nnodes]*sin(m*phi)
- dPdp*up[node+part*nnodes]*cos(m*phi));
int_xXDYu_re[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*sin(m*phi)
+ dPdt*up[node+part*nnodes]*cos(m*phi));
int_xXDYu_im[j] = N_nm/sin(theta)*(dPdp*ut[node+part*nnodes]*cos(m*phi)
- dPdt*up[node+part*nnodes]*sin(m*phi));
//if(n == 1 && m == 1) {
// printf("int_Yp_re(%f, %f) = %e\n", theta, phi, int_Yp_re[j]);
// printf("int_rDYu_re(%f, %f) = %e\n", theta, phi, int_rDYu_re[j]);
// printf("int_xXDYu_re(%f, %f) = %e ut = %e up = %e\n", theta, phi, int_xXDYu_re[j], ut[node+part*nnodes], up[node+part*nnodes]);
//}
__syncthreads();
// compute scalar products
// put sum into first node position for each coeff for each particle
if(node == 0) {
int_Yp_re[j] *= A1;
int_Yp_im[j] *= A1;
int_rDYu_re[j] *= A1;
int_rDYu_im[j] *= A1;
int_xXDYu_re[j] *= A1;
int_xXDYu_im[j] *= A1;
for(i = 1; i < 6; i++) {
int_Yp_re[j] += A1 * int_Yp_re[j+i];
int_Yp_im[j] += A1 * int_Yp_im[j+i];
int_rDYu_re[j] += A1 * int_rDYu_re[j+i];
int_rDYu_im[j] += A1 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A1 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A1 * int_xXDYu_im[j+i];
}
for(i = 6; i < 18; i++) {
int_Yp_re[j] += A2 * int_Yp_re[j+i];
int_Yp_im[j] += A2 * int_Yp_im[j+i];
int_rDYu_re[j] += A2 * int_rDYu_re[j+i];
int_rDYu_im[j] += A2 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A2 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A2 * int_xXDYu_im[j+i];
}
for(i = 18; i < 26; i++) {
int_Yp_re[j] += A3 * int_Yp_re[j+i];
int_Yp_im[j] += A3 * int_Yp_im[j+i];
int_rDYu_re[j] += A3 * int_rDYu_re[j+i];
int_rDYu_im[j] += A3 * int_rDYu_im[j+i];
int_xXDYu_re[j] += A3 * int_xXDYu_re[j+i];
int_xXDYu_im[j] += A3 * int_xXDYu_im[j+i];
}
/*for(i = 26; i < 50; i++) {
int_Yp_re[j] += B * int_Yp_re[j+i];
int_Yp_im[j] += B * int_Yp_im[j+i];
int_rDYu_re[j] += B * int_rDYu_re[j+i];
int_rDYu_im[j] += B * int_rDYu_im[j+i];
int_xXDYu_re[j] += B * int_xXDYu_re[j+i];
int_xXDYu_im[j] += B * int_xXDYu_im[j+i];
}
*/
//if(n == 1 && m == 1) {
// printf("int_Yp_re = %e\n", int_Yp_re[j]);
// printf("int_rDYu_re = %e\n", int_rDYu_re[j]);
// printf("int_xXDYu_re = %e\n", int_xXDYu_re[j]);
//}
#ifdef TEST
real relax = 1.0;
#else
real relax = lambrelax;
#endif
if(n == 0) {
pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff]
+ relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*pow(ars,n)
- pnm_re0[stride*part+coeff]);
pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff]
+ relax*(parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*pow(ars,n)
- pnm_im0[stride*part+coeff]);
phinm_re[stride*part+coeff] = 0.;
phinm_im[stride*part+coeff] = 0.;
chinm_re[stride*part+coeff] = 0.;
chinm_im[stride*part+coeff] = 0.;
} else {
// calculate p_nm and phi_nm
real A = (1.-0.5*n*(2.*n-1.)/(n+1.)*pow(ars,2.*n+1.))*pow(rsa,n);
real B = n*(2.*n-1.)*(2.*n+1.)/(n+1.)*pow(ars,n+1.);
real C = 0.25*n*(2.*(n+3.)/(2.*n+3.)
+ (n-2.-n*(2.*n+1.)/(2.*n+3.)*ars*ars)*pow(ars,2.*n+1.))*pow(rsa,n+1.);
real D = n*(n+1.+0.5*((n-2.)*(2.*n+1.)*rsa*rsa
- n*(2.*n-1.))*pow(ars,2.*n+1.))*pow(rsa,n-1.);
pnm_re[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu
*int_Yp_re[j]*D + parts[part].r/nu*int_rDYu_re[j]*B) / (A*D+B*C);
pnm_im[stride*part+coeff] = (parts[part].r*parts[part].r/mu/nu
*int_Yp_im[j]*D + parts[part].r/nu*int_rDYu_im[j]*B) / (A*D+B*C);
phinm_re[stride*part+coeff] = (parts[part].r/nu*int_rDYu_re[j]*A
- parts[part].r*parts[part].r/mu/nu*int_Yp_re[j]*C) / (A*D+B*C);
phinm_im[stride*part+coeff] = (parts[part].r/nu*int_rDYu_im[j]*A
- parts[part].r*parts[part].r/mu/nu*int_Yp_im[j]*C) / (A*D+B*C);
// calculate chi_nm
real E = n*(n+1.)*(pow(ars,2.*n+1.)-1.)*pow(rsa, n);
chinm_re[stride*part+coeff] = parts[part].r/nu*int_xXDYu_re[j] / E;
chinm_im[stride*part+coeff] = parts[part].r/nu*int_xXDYu_im[j] / E;
// apply underrelaxation
pnm_re[stride*part+coeff] = pnm_re0[stride*part+coeff]*(1.-relax)
+ relax*pnm_re[stride*part+coeff];
pnm_im[stride*part+coeff] = pnm_im0[stride*part+coeff]*(1.-relax)
+ relax*pnm_im[stride*part+coeff];
phinm_re[stride*part+coeff] = phinm_re0[stride*part+coeff]*(1.-relax)
+ relax*phinm_re[stride*part+coeff];
phinm_im[stride*part+coeff] = phinm_im0[stride*part+coeff]*(1.-relax)
+ relax*phinm_im[stride*part+coeff];
chinm_re[stride*part+coeff] = chinm_re0[stride*part+coeff]*(1.-relax)
+ relax*chinm_re[stride*part+coeff];
chinm_im[stride*part+coeff] = chinm_im0[stride*part+coeff]*(1.-relax)
+ relax*chinm_im[stride*part+coeff];
//printf("pnm_re(%d,%d) = %e\n", n,m, pnm_re[stride*part+coeff]);
}
}
}
}
__global__ void cuda_calc_forces(dom_struct *dom, part_struct *parts,
int nparts, gradP_struct gradP,
real rho_f, real mu, real nu, int stride,
real *pnm_re, real *pnm_im,
real *phinm_re, real *phinm_im,
real *chinm_re, real *chinm_im)
{
int pp = threadIdx.x + blockIdx.x*blockDim.x; // particle number
if(pp < nparts) {
real vol = 4./3. * PI * parts[pp].r*parts[pp].r*parts[pp].r;
real N10 = sqrt(3./4./PI);
real N11 = sqrt(3./8./PI);
parts[pp].Fx = rho_f * vol * (parts[pp].udot + gradP.x/rho_f)
- PI * mu * nu * 2.*N11 * (pnm_re[stride*pp + 2]
+ 6.*phinm_re[stride*pp + 2]);
parts[pp].Fy = rho_f * vol * (parts[pp].vdot + gradP.y/rho_f)
+ PI * mu * nu * 2.*N11 * (pnm_im[stride*pp + 2]
+ 6.*phinm_im[stride*pp + 2]);
parts[pp].Fz = rho_f * vol * (parts[pp].wdot + gradP.z/rho_f)
+ PI * mu * nu * N10 * (pnm_re[stride*pp + 1]
+ 6.*phinm_re[stride*pp + 1]);
parts[pp].Lx = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oxdot
- 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_re[stride*pp + 2];
parts[pp].Ly = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].oydot
+ 8. * PI * mu * nu * 2.*N11 * parts[pp].r * chinm_im[stride*pp + 2];
parts[pp].Lz = rho_f * vol * parts[pp].r*parts[pp].r * parts[pp].ozdot
+ 8. * PI * mu * nu * N10 * parts[pp].r * chinm_re[stride*pp + 1];
}
}
__global__ void compute_error(real lamb_cut, int stride, int nparts,
real *pnm_re, real *pnm_re0, real *pnm_im, real *pnm_im0,
real *phinm_re, real *phinm_re0, real *phinm_im, real *phinm_im0,
real *chinm_re, real *chinm_re0, real *chinm_im, real *chinm_im0,
real *coeffs, real *errors, real *part_errors, dom_struct *dom, real nu)
{
int part = blockIdx.x;
int i,j;
real tmp = FLT_MIN;
int loc = 0;
real avg = 0;
real div = 0;
// create shared memory space
__shared__ real s_coeffs[6*21]; // ** have to hard-code this length **
__shared__ real s_coeffs0[6*21]; // ** have to hard-code this length **
// using 6 coefficient sets, each holding
// a maximum of 21 coefficients (5th-order
// truncation)
// copy coeffs for this particle into shared memory
for(i = 0; i < stride; i++) {
s_coeffs[i] = pnm_re[part*stride+i];
s_coeffs[i+1*stride] = pnm_im[part*stride+i];
s_coeffs[i+2*stride] = phinm_re[part*stride+i];
s_coeffs[i+3*stride] = phinm_im[part*stride+i];
s_coeffs[i+4*stride] = chinm_re[part*stride+i];
s_coeffs[i+5*stride] = chinm_im[part*stride+i];
s_coeffs0[i] = pnm_re0[part*stride+i];
s_coeffs0[i+1*stride] = pnm_im0[part*stride+i];
s_coeffs0[i+2*stride] = phinm_re0[part*stride+i];
s_coeffs0[i+3*stride] = phinm_im0[part*stride+i];
s_coeffs0[i+4*stride] = chinm_re0[part*stride+i];
s_coeffs0[i+5*stride] = chinm_im0[part*stride+i];
}
// compute the average of the coefficients
for(i = 0; i < stride*6; i++) {
avg += s_coeffs[i]*s_coeffs[i];
}
avg = avg / (stride*6.);
// sort the coefficients in shared memory and calculate errors along the way
for(i = 0; i < 6*stride; i++) {
// search for the largest magnitude value in shared and store its location
tmp = FLT_MIN;
for(j = 0; j < 6*stride; j++) {
if(s_coeffs[j]*s_coeffs[j] > tmp) {
tmp = s_coeffs[j]*s_coeffs[j];
loc = j;
}
}
// move the largest value into sorted list
coeffs[part*stride+i] = s_coeffs[loc];
// if its corresponding coefficient has large enough magnitude,
// compute error for this coefficient
if(fabs(s_coeffs[loc]) > lamb_cut*fabs(coeffs[part*stride+0])) {
div = fabs(s_coeffs[loc]);// + fabs(avg)*1e-4;
if(div < 1e-16) div = 1e-16;
errors[part*stride+i] = fabs((s_coeffs[loc] - s_coeffs0[loc]) / div);
} else errors[part*stride+i] = 0.;
// discard this value since we've used it once
s_coeffs[loc] = 0.;
}
// find the largest error for each particle
tmp = FLT_MIN;
for(i = 0; i < 6*stride; i++) {
if(errors[part*stride+i] > tmp) tmp = errors[part*stride+i];
}
// write error to return for each particle
part_errors[part] = tmp;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.