hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
9f4e767875c16171c713b8fb2ab65ae47784eae0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#define GLEW_STATIC
#include <GL/glew.h>
// GLFW
#include <GLFW/glfw3.h>
//CUDA
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// Other includes
#include "Shader.h"
// Function prototypes
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mode);
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
// Window dimensions
const GLuint WIDTH = 800, HEIGHT = 600;
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
////////////////////////////////////////////////
//
// CUDA part
//
/////////////////////////////////////////////////
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
////////////////////////////////////////////////
//
// GL part
//
/////////////////////////////////////////////////
// Init GLFW
glfwInit();
// Set all the required options for GLFW
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
// Create a GLFWwindow object that we can use for GLFW's functions
GLFWwindow* window = glfwCreateWindow(WIDTH, HEIGHT, "LearnOpenGL", nullptr, nullptr);
glfwMakeContextCurrent(window);
// Set the required callback functions
glfwSetKeyCallback(window, key_callback);
// Set this to true so GLEW knows to use a modern approach to retrieving function pointers and extensions
glewExperimental = GL_TRUE;
// Initialize GLEW to setup the OpenGL Function pointers
glewInit();
// Define the viewport dimensions
glViewport(0, 0, WIDTH, HEIGHT);
// Build and compile our shader program
Shader ourShader("vs.txt", "fs.txt");
// Set up vertex data (and buffer(s)) and attribute pointers
GLfloat vertices[] = {
// Positions // Colors
0.5f, -0.5f, 0.0f, 1.0f, 0.0f, 0.0f, // Bottom Right
-0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, // Bottom Left
0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 1.0f // Top
};
GLuint VBO, VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
// Bind the Vertex Array Object first, then bind and set vertex buffer(s) and attribute pointer(s).
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
// Position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
// Color attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
glBindVertexArray(0); // Unbind VAO
// Game loop
while (!glfwWindowShouldClose(window))
{
// Check if any events have been activiated (key pressed, mouse moved etc.) and call corresponding response functions
glfwPollEvents();
// Render
// Clear the colorbuffer
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Draw the triangle
ourShader.Use();
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
// Swap the screen buffers
glfwSwapBuffers(window);
}
// Properly de-allocate all resources once they've outlived their purpose
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
// Terminate GLFW, clearing any resources allocated by GLFW.
glfwTerminate();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
// Is called whenever a key is pressed/released via GLFW
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mode)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
} | 9f4e767875c16171c713b8fb2ab65ae47784eae0.cu | #include <stdio.h>
#include <iostream>
#define GLEW_STATIC
#include <GL/glew.h>
// GLFW
#include <GLFW/glfw3.h>
//CUDA
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// Other includes
#include "Shader.h"
// Function prototypes
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mode);
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
// Window dimensions
const GLuint WIDTH = 800, HEIGHT = 600;
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
////////////////////////////////////////////////
//
// CUDA part
//
/////////////////////////////////////////////////
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
////////////////////////////////////////////////
//
// GL part
//
/////////////////////////////////////////////////
// Init GLFW
glfwInit();
// Set all the required options for GLFW
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
// Create a GLFWwindow object that we can use for GLFW's functions
GLFWwindow* window = glfwCreateWindow(WIDTH, HEIGHT, "LearnOpenGL", nullptr, nullptr);
glfwMakeContextCurrent(window);
// Set the required callback functions
glfwSetKeyCallback(window, key_callback);
// Set this to true so GLEW knows to use a modern approach to retrieving function pointers and extensions
glewExperimental = GL_TRUE;
// Initialize GLEW to setup the OpenGL Function pointers
glewInit();
// Define the viewport dimensions
glViewport(0, 0, WIDTH, HEIGHT);
// Build and compile our shader program
Shader ourShader("vs.txt", "fs.txt");
// Set up vertex data (and buffer(s)) and attribute pointers
GLfloat vertices[] = {
// Positions // Colors
0.5f, -0.5f, 0.0f, 1.0f, 0.0f, 0.0f, // Bottom Right
-0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, // Bottom Left
0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 1.0f // Top
};
GLuint VBO, VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
// Bind the Vertex Array Object first, then bind and set vertex buffer(s) and attribute pointer(s).
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
// Position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
// Color attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
glBindVertexArray(0); // Unbind VAO
// Game loop
while (!glfwWindowShouldClose(window))
{
// Check if any events have been activiated (key pressed, mouse moved etc.) and call corresponding response functions
glfwPollEvents();
// Render
// Clear the colorbuffer
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Draw the triangle
ourShader.Use();
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
// Swap the screen buffers
glfwSwapBuffers(window);
}
// Properly de-allocate all resources once they've outlived their purpose
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
// Terminate GLFW, clearing any resources allocated by GLFW.
glfwTerminate();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
// Is called whenever a key is pressed/released via GLFW
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mode)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
} |
5015999d6e9c926e510a9da262b39d0b0266df65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelAdd(float *dvalues, int numOperations, int firstInd, int nextColInd)
{
int vi = firstInd + blockIdx.x * blockDim.x + threadIdx.x;
// "numOperations" is the 2nd input parameter to our executable
if (vi < nextColInd) {
for (int j=0; j<numOperations; ++j) {
// The operation performed on each nonzero of our sparse matrix:
dvalues[vi] *=dvalues[vi]+dvalues[vi]*dvalues[vi]; // POINT 3: Choices you may try here:
} // *= (for multiply), /= (for division),
} // or you may investigate some other :-)
} | 5015999d6e9c926e510a9da262b39d0b0266df65.cu | #include "includes.h"
__global__ void kernelAdd(float *dvalues, int numOperations, int firstInd, int nextColInd)
{
int vi = firstInd + blockIdx.x * blockDim.x + threadIdx.x;
// "numOperations" is the 2nd input parameter to our executable
if (vi < nextColInd) {
for (int j=0; j<numOperations; ++j) {
// The operation performed on each nonzero of our sparse matrix:
dvalues[vi] *=dvalues[vi]+dvalues[vi]*dvalues[vi]; // POINT 3: Choices you may try here:
} // *= (for multiply), /= (for division),
} // or you may investigate some other :-)
} |
8267901db189a23a802f8c10f6d2d006ed119218.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@author Stan Tomov
@generated from zgemv_conjv.cu normal z -> d, Wed Sep 17 15:08:23 2014
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
#define num_threads 256
__global__ void
dgemv_conjv_kernel(
int m, int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx, double beta,
double * __restrict__ y, int incy)
{
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
if ( ind < m ) {
double res = MAGMA_D_ZERO;
#pragma unroll
for( int i=0; i < n; i ++ ) {
res += A[0] * MAGMA_D_CNJG(x[0]);
A += lda;
x += incx;
}
y[ind*incy] = alpha * res + beta * y[ind*incy];
}
}
/**
Purpose
-------
DGEMV_CONJV performs the matrix-vector operation
y := alpha*A*conj(x) + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha DOUBLE_PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
A DOUBLE_PRECISION array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
x DOUBLE_PRECISION array of dimension n
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
y DOUBLE PRECISION array of dimension m
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dgemv_conjv(
magma_int_t m, magma_int_t n, double alpha,
const double *A, magma_int_t lda,
const double *x, magma_int_t incx,
double beta,
double *y, magma_int_t incy)
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( lda < m )
info = -5;
else if ( incx == 0 )
info = -7;
else if ( incy == 0 )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t blocks = (m - 1)/num_threads + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(num_threads, 1, 1);
hipLaunchKernelGGL(( dgemv_conjv_kernel), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, alpha, A, lda, x, incx, beta, y, incy);
}
#undef num_threads
| 8267901db189a23a802f8c10f6d2d006ed119218.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@author Stan Tomov
@generated from zgemv_conjv.cu normal z -> d, Wed Sep 17 15:08:23 2014
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
#define num_threads 256
__global__ void
dgemv_conjv_kernel(
int m, int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx, double beta,
double * __restrict__ y, int incy)
{
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
if ( ind < m ) {
double res = MAGMA_D_ZERO;
#pragma unroll
for( int i=0; i < n; i ++ ) {
res += A[0] * MAGMA_D_CNJG(x[0]);
A += lda;
x += incx;
}
y[ind*incy] = alpha * res + beta * y[ind*incy];
}
}
/**
Purpose
-------
DGEMV_CONJV performs the matrix-vector operation
y := alpha*A*conj(x) + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha DOUBLE_PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
A DOUBLE_PRECISION array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
x DOUBLE_PRECISION array of dimension n
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
y DOUBLE PRECISION array of dimension m
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@ingroup magma_dblas2
********************************************************************/
extern "C" void
magmablas_dgemv_conjv(
magma_int_t m, magma_int_t n, double alpha,
const double *A, magma_int_t lda,
const double *x, magma_int_t incx,
double beta,
double *y, magma_int_t incy)
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( lda < m )
info = -5;
else if ( incx == 0 )
info = -7;
else if ( incy == 0 )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t blocks = (m - 1)/num_threads + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(num_threads, 1, 1);
dgemv_conjv_kernel<<< grid, threads, 0, magma_stream >>>
(m, n, alpha, A, lda, x, incx, beta, y, incy);
}
#undef num_threads
|
1e98f030e405a04d1b2cfe225467795a7fb46da8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHTensor.hpp"
#include "common.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include "THHReduceApplyUtils.cuh"
#include "TH/THHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHAtomics.cuh"
#include <THH/THHApply.cuh>
template <typename Dtype>
__global__ void VolumetricReplicationPadding_updateOutput(
THCDeviceTensor<Dtype, 5> input,
THCDeviceTensor<Dtype, 5> output,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (output.getSize(2) * output.getSize(3) *
output.getSize(4))) {
return;
}
int outputPointX = outputPointId % output.getSize(4);
int outputPointY = (outputPointId / output.getSize(4)) % output.getSize(3);
int outputPointZ = outputPointId / (output.getSize(3) * output.getSize(4));
int iStartX = max(0, -pleft);
int iStartY = max(0, -ptop);
int iStartZ = max(0, -pfront);
int oStartX = max(0, pleft);
int oStartY = max(0, ptop);
int oStartZ = max(0, pfront);
int inputPointX = min(max(pleft, outputPointX),
input.getSize(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = min(max(ptop, outputPointY),
input.getSize(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = min(max(pfront, outputPointZ),
input.getSize(2) + pfront - 1) - oStartZ + iStartZ;
Dtype valueToCopy =
input[batch][plane][inputPointZ][inputPointY][inputPointX];
output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename Dtype>
__global__ void VolumetricReplicationPadding_updateGradInput(
THCDeviceTensor<Dtype, 5> gradInput,
THCDeviceTensor<Dtype, 5> gradOutput,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (gradOutput.getSize(2) * gradOutput.getSize(3) *
gradOutput.getSize(4))) {
return;
}
int outputPointX = outputPointId % gradOutput.getSize(4);
int outputPointY = (outputPointId / gradOutput.getSize(4)) %
gradOutput.getSize(3);
int outputPointZ = outputPointId / (gradOutput.getSize(3) *
gradOutput.getSize(4));
int iStartX = max(0, -pleft);
int iStartY = max(0, -ptop);
int iStartZ = max(0, -pfront);
int oStartX = max(0, pleft);
int oStartY = max(0, ptop);
int oStartZ = max(0, pfront);
int inputPointX = min(max(pleft, outputPointX),
gradInput.getSize(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = min(max(ptop, outputPointY),
gradInput.getSize(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = min(max(pfront, outputPointZ),
gradInput.getSize(2) + pfront - 1) - oStartZ + iStartZ;
Dtype valueToCopy =
gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
#include "generic/VolumetricReplicationPadding.cu"
#include "THHGenerateFloatTypes.h"
| 1e98f030e405a04d1b2cfe225467795a7fb46da8.cu | #include "THCUNN.h"
#include "THCTensor.hpp"
#include "common.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include "THCReduceApplyUtils.cuh"
#include "TH/THHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCAtomics.cuh"
#include <THC/THCApply.cuh>
template <typename Dtype>
__global__ void VolumetricReplicationPadding_updateOutput(
THCDeviceTensor<Dtype, 5> input,
THCDeviceTensor<Dtype, 5> output,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (output.getSize(2) * output.getSize(3) *
output.getSize(4))) {
return;
}
int outputPointX = outputPointId % output.getSize(4);
int outputPointY = (outputPointId / output.getSize(4)) % output.getSize(3);
int outputPointZ = outputPointId / (output.getSize(3) * output.getSize(4));
int iStartX = max(0, -pleft);
int iStartY = max(0, -ptop);
int iStartZ = max(0, -pfront);
int oStartX = max(0, pleft);
int oStartY = max(0, ptop);
int oStartZ = max(0, pfront);
int inputPointX = min(max(pleft, outputPointX),
input.getSize(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = min(max(ptop, outputPointY),
input.getSize(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = min(max(pfront, outputPointZ),
input.getSize(2) + pfront - 1) - oStartZ + iStartZ;
Dtype valueToCopy =
input[batch][plane][inputPointZ][inputPointY][inputPointX];
output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename Dtype>
__global__ void VolumetricReplicationPadding_updateGradInput(
THCDeviceTensor<Dtype, 5> gradInput,
THCDeviceTensor<Dtype, 5> gradOutput,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (gradOutput.getSize(2) * gradOutput.getSize(3) *
gradOutput.getSize(4))) {
return;
}
int outputPointX = outputPointId % gradOutput.getSize(4);
int outputPointY = (outputPointId / gradOutput.getSize(4)) %
gradOutput.getSize(3);
int outputPointZ = outputPointId / (gradOutput.getSize(3) *
gradOutput.getSize(4));
int iStartX = max(0, -pleft);
int iStartY = max(0, -ptop);
int iStartZ = max(0, -pfront);
int oStartX = max(0, pleft);
int oStartY = max(0, ptop);
int oStartZ = max(0, pfront);
int inputPointX = min(max(pleft, outputPointX),
gradInput.getSize(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = min(max(ptop, outputPointY),
gradInput.getSize(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = min(max(pfront, outputPointZ),
gradInput.getSize(2) + pfront - 1) - oStartZ + iStartZ;
Dtype valueToCopy =
gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
#include "generic/VolumetricReplicationPadding.cu"
#include "THCGenerateFloatTypes.h"
|
26ecf0a2cc86beeeda124e806d044fb84897e8fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zbcsrcpy.cu normal z -> s, Fri Jul 18 17:34:27 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
// every multiprocessor handles one BCSR-block to copy from A
__global__ void
sbcsrvalcpy_kernel(
int size_b,
magma_int_t num_blocks,
float **Aval,
float **Bval ){
if(blockIdx.x*65535+blockIdx.y < num_blocks){
float *dA = Aval[ blockIdx.x*65535+blockIdx.y ];
float *dB = Bval[ blockIdx.x*65535+blockIdx.y ];
int i = threadIdx.x;
while( i<size_b*size_b ){
dB[i] = dA[i];
i+=BLOCK_SIZE;
}
}
}
// every multiprocessor handles one BCSR-block to initialize with 0
__global__ void
sbcsrvalzro_kernel(
int size_b,
magma_int_t num_blocks,
float **Bval ){
if(blockIdx.x*65535+blockIdx.y < num_blocks){
float *dB = Bval[ blockIdx.x*65535+blockIdx.y ];
int i = threadIdx.x;
//dB += i;
while( i<size_b*size_b ){
dB[i] = MAGMA_S_MAKE(0.0, 0.0);
i+=BLOCK_SIZE;
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine copies the filled blocks
from the original matrix A and initializes the blocks that will later be
filled in the factorization process with zeros.
Arguments
---------
@param
size_b magma_int_t
blocksize in BCSR
@param
num_blocks magma_int_t
number of nonzero blocks
@param
num_zblocks magma_int_t
number of zero-blocks (will later be filled)
@param
Aval float**
pointers to the nonzero blocks in A
@param
Bval float**
pointers to the nonzero blocks in B
@param
Bval2 float**
pointers to the zero blocks in B
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbcsrvalcpy( magma_int_t size_b,
magma_int_t num_blocks,
magma_int_t num_zblocks,
float **Aval,
float **Bval,
float **Bval2 ){
dim3 dimBlock( BLOCK_SIZE, 1, 1 );
// the grids are adapted to the number of nonzero/zero blocks
// the upper block-number the kernels can handle is 65535*65535
int dimgrid1 = 65535;
int dimgrid2 = (num_blocks+65535-1)/65535;
int dimgrid3 = (num_zblocks+65535-1)/65535;
dim3 dimGrid( dimgrid2, dimgrid1, 1 );
hipLaunchKernelGGL(( sbcsrvalcpy_kernel), dim3(dimGrid),dim3(dimBlock), 0, magma_stream ,
size_b, num_blocks, Aval, Bval );
dim3 dimGrid2( dimgrid3, dimgrid1, 1 );
hipLaunchKernelGGL(( sbcsrvalzro_kernel), dim3(dimGrid2),dim3(dimBlock), 0, magma_stream ,
size_b, num_zblocks, Bval2 );
return MAGMA_SUCCESS;
}
| 26ecf0a2cc86beeeda124e806d044fb84897e8fd.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zbcsrcpy.cu normal z -> s, Fri Jul 18 17:34:27 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
// every multiprocessor handles one BCSR-block to copy from A
__global__ void
sbcsrvalcpy_kernel(
int size_b,
magma_int_t num_blocks,
float **Aval,
float **Bval ){
if(blockIdx.x*65535+blockIdx.y < num_blocks){
float *dA = Aval[ blockIdx.x*65535+blockIdx.y ];
float *dB = Bval[ blockIdx.x*65535+blockIdx.y ];
int i = threadIdx.x;
while( i<size_b*size_b ){
dB[i] = dA[i];
i+=BLOCK_SIZE;
}
}
}
// every multiprocessor handles one BCSR-block to initialize with 0
__global__ void
sbcsrvalzro_kernel(
int size_b,
magma_int_t num_blocks,
float **Bval ){
if(blockIdx.x*65535+blockIdx.y < num_blocks){
float *dB = Bval[ blockIdx.x*65535+blockIdx.y ];
int i = threadIdx.x;
//dB += i;
while( i<size_b*size_b ){
dB[i] = MAGMA_S_MAKE(0.0, 0.0);
i+=BLOCK_SIZE;
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine copies the filled blocks
from the original matrix A and initializes the blocks that will later be
filled in the factorization process with zeros.
Arguments
---------
@param
size_b magma_int_t
blocksize in BCSR
@param
num_blocks magma_int_t
number of nonzero blocks
@param
num_zblocks magma_int_t
number of zero-blocks (will later be filled)
@param
Aval float**
pointers to the nonzero blocks in A
@param
Bval float**
pointers to the nonzero blocks in B
@param
Bval2 float**
pointers to the zero blocks in B
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbcsrvalcpy( magma_int_t size_b,
magma_int_t num_blocks,
magma_int_t num_zblocks,
float **Aval,
float **Bval,
float **Bval2 ){
dim3 dimBlock( BLOCK_SIZE, 1, 1 );
// the grids are adapted to the number of nonzero/zero blocks
// the upper block-number the kernels can handle is 65535*65535
int dimgrid1 = 65535;
int dimgrid2 = (num_blocks+65535-1)/65535;
int dimgrid3 = (num_zblocks+65535-1)/65535;
dim3 dimGrid( dimgrid2, dimgrid1, 1 );
sbcsrvalcpy_kernel<<<dimGrid,dimBlock, 0, magma_stream >>>
( size_b, num_blocks, Aval, Bval );
dim3 dimGrid2( dimgrid3, dimgrid1, 1 );
sbcsrvalzro_kernel<<<dimGrid2,dimBlock, 0, magma_stream >>>
( size_b, num_zblocks, Bval2 );
return MAGMA_SUCCESS;
}
|
d6fa59016e4bfe275ed1430644ac5208bef4e079.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//nvcc -ptx EM6.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
__device__ void EM1( double *r,
double *z,
double * a,
double * b,
int * parDelete,
const int parNum,
const int gridR,
const int gridZ,
const double dr,
const double dz) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum ){
return;
}
double r0 = r[n];
double z0 = z[n];
int ar,az,a1,a2,a3,a4;
double b1,b2,b3,b4;
ar = floor(r0/dr-0.5);
az = floor(z0/dz-0.5);
if (ar<0){
if (az<0){
a1 = 1;
a2 = 1;
a3 = 1;
a4 = (ar+1) + (az+1) * gridR + 1;
b1 = 0;
b2 = 0;
b3 = 0;
b4 = 1;
}
else if (az>=gridZ-1){
a1 = 1;
a2 = (ar+1) + az * gridR + 1;
a3 = 1;
a4 = 1;
b1 = 0;
b2 = 1;
b3 = 0;
b4 = 0;
}
else{
a1 = 1;
a2 = (ar+1) + az * gridR + 1;
a3 = 1;
a4 = (ar+1) + (az+1) * gridR + 1;
b1 = 0;
b2 = ((az+1.5)*dz-z0)/(dz);
b3 = 0;
b4 = (z0-(az+0.5)*dz)/(dz);
}
}
else if (ar>=gridR-1){
if( az<0 ){
a1 = 1;
a2 = 1;
a3 = ar + (az+1) * gridR + 1;
a4 = 1;
b1 = 0;
b2 = 0;
b3 = 1;
b4 = 0;
}
else if (az>=gridZ-1){
a1 = ar + az * gridR + 1;
a2 = 1;
a3 = 1;
a4 = 1;
b1 = 1;
b2 = 0;
b3 = 0;
b4 = 0;
}
else{
a1 = ar + az * gridR + 1;
a2 = 1;
a3 = ar + (az+1) * gridR + 1;
a4 = 1;
b1 = ((az+1.5)*dz-z0)/(dz);
b2 = 0;
b3 = (z0-(az+0.5)*dz)/(dz);
b4 = 0;
}
}
else{
if( az<0 ){
a1 = 1;
a2 = 1;
a3 = ar + (az+1) * gridR + 1;
a4 = (ar+1) + (az+1) * gridR + 1;
b1 = 0;
b2 = 0;
b3 = ((ar+1.5)*dr-r0)/(dr);
b4 = (r0-(ar+0.5)*dr)/(dr);
}
else if (az>=gridZ-1){
a1 = ar + az * gridR + 1;
a2 = (ar+1) + az * gridR + 1;
a3 = 1;
a4 = 1;
b1 = ((ar+1.5)*dr-r0)/(dr);
b2 = (r0-(ar+0.5)*dr)/(dr);
b3 = 0;
b4 = 0;
}
else{
a1 = ar + az * gridR + 1;
a2 = (ar+1) + az * gridR + 1;
a3 = ar + (az+1) * gridR + 1;
a4 = (ar+1) + (az+1) * gridR + 1;
b1 = ((ar+1.5)*dr-r0)*((az+1.5)*dz-z0)/(dr*dz);
b2 = (r0-(ar+0.5)*dr)*((az+1.5)*dz-z0)/(dr*dz);
b3 = ((ar+1.5)*dr-r0)*(z0-(az+0.5)*dz)/(dr*dz);
b4 = (r0-(ar+0.5)*dr)*(z0-(az+0.5)*dz)/(dr*dz);
}
}
if (parDelete[n]==1){
a1 = 1;
a2 = 1;
a3 = 1;
a4 = 1;
b1 = 0;
b2 = 0;
b3 = 0;
b4 = 0;
}
a[n] = a1;
a[n+parNum] = a2;
a[n+2*parNum] = a3;
a[n+3*parNum] = a4;
b[n] = b1;
b[n+parNum] = b2;
b[n+2*parNum] = b3;
b[n+3*parNum] = b4;
}
__global__ void processMandelbrotElement(
double *r,
double *z,
double * a,
double * b,
int * parDelete,
const int parNum,
const int gridR,
const int gridZ,
const double dr,
const double dz) {
EM1(r, z, a, b, parDelete, parNum, gridR, gridZ, dr, dz);
}
| d6fa59016e4bfe275ed1430644ac5208bef4e079.cu | //nvcc -ptx EM6.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
__device__ void EM1( double *r,
double *z,
double * a,
double * b,
int * parDelete,
const int parNum,
const int gridR,
const int gridZ,
const double dr,
const double dz) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum ){
return;
}
double r0 = r[n];
double z0 = z[n];
int ar,az,a1,a2,a3,a4;
double b1,b2,b3,b4;
ar = floor(r0/dr-0.5);
az = floor(z0/dz-0.5);
if (ar<0){
if (az<0){
a1 = 1;
a2 = 1;
a3 = 1;
a4 = (ar+1) + (az+1) * gridR + 1;
b1 = 0;
b2 = 0;
b3 = 0;
b4 = 1;
}
else if (az>=gridZ-1){
a1 = 1;
a2 = (ar+1) + az * gridR + 1;
a3 = 1;
a4 = 1;
b1 = 0;
b2 = 1;
b3 = 0;
b4 = 0;
}
else{
a1 = 1;
a2 = (ar+1) + az * gridR + 1;
a3 = 1;
a4 = (ar+1) + (az+1) * gridR + 1;
b1 = 0;
b2 = ((az+1.5)*dz-z0)/(dz);
b3 = 0;
b4 = (z0-(az+0.5)*dz)/(dz);
}
}
else if (ar>=gridR-1){
if( az<0 ){
a1 = 1;
a2 = 1;
a3 = ar + (az+1) * gridR + 1;
a4 = 1;
b1 = 0;
b2 = 0;
b3 = 1;
b4 = 0;
}
else if (az>=gridZ-1){
a1 = ar + az * gridR + 1;
a2 = 1;
a3 = 1;
a4 = 1;
b1 = 1;
b2 = 0;
b3 = 0;
b4 = 0;
}
else{
a1 = ar + az * gridR + 1;
a2 = 1;
a3 = ar + (az+1) * gridR + 1;
a4 = 1;
b1 = ((az+1.5)*dz-z0)/(dz);
b2 = 0;
b3 = (z0-(az+0.5)*dz)/(dz);
b4 = 0;
}
}
else{
if( az<0 ){
a1 = 1;
a2 = 1;
a3 = ar + (az+1) * gridR + 1;
a4 = (ar+1) + (az+1) * gridR + 1;
b1 = 0;
b2 = 0;
b3 = ((ar+1.5)*dr-r0)/(dr);
b4 = (r0-(ar+0.5)*dr)/(dr);
}
else if (az>=gridZ-1){
a1 = ar + az * gridR + 1;
a2 = (ar+1) + az * gridR + 1;
a3 = 1;
a4 = 1;
b1 = ((ar+1.5)*dr-r0)/(dr);
b2 = (r0-(ar+0.5)*dr)/(dr);
b3 = 0;
b4 = 0;
}
else{
a1 = ar + az * gridR + 1;
a2 = (ar+1) + az * gridR + 1;
a3 = ar + (az+1) * gridR + 1;
a4 = (ar+1) + (az+1) * gridR + 1;
b1 = ((ar+1.5)*dr-r0)*((az+1.5)*dz-z0)/(dr*dz);
b2 = (r0-(ar+0.5)*dr)*((az+1.5)*dz-z0)/(dr*dz);
b3 = ((ar+1.5)*dr-r0)*(z0-(az+0.5)*dz)/(dr*dz);
b4 = (r0-(ar+0.5)*dr)*(z0-(az+0.5)*dz)/(dr*dz);
}
}
if (parDelete[n]==1){
a1 = 1;
a2 = 1;
a3 = 1;
a4 = 1;
b1 = 0;
b2 = 0;
b3 = 0;
b4 = 0;
}
a[n] = a1;
a[n+parNum] = a2;
a[n+2*parNum] = a3;
a[n+3*parNum] = a4;
b[n] = b1;
b[n+parNum] = b2;
b[n+2*parNum] = b3;
b[n+3*parNum] = b4;
}
__global__ void processMandelbrotElement(
double *r,
double *z,
double * a,
double * b,
int * parDelete,
const int parNum,
const int gridR,
const int gridZ,
const double dr,
const double dz) {
EM1(r, z, a, b, parDelete, parNum, gridR, gridZ, dr, dz);
}
|
33b3bdd2a0713d55a4a8336058e535e096c078c0.hip | // !!! This is a file automatically generated by hipify!!!
//standard includes
#include <iostream>
#include <vector>
#include <fstream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <rocblas.h>
#include "CycleTimer.h"
#define BLOCKSIZE 1024
#define SCAN_BLOCK_DIM BLOCKSIZE
#include "exclusiveScan.cu_inl"
//include definition file
#include "neuralNetwork.h"
//#include "/afs/cs/academic/class/15418-s17/public/sw/OpenBLAS/cblas.h"
//#include <openblas/cblas.h>
using namespace std;
void gpu_blas_mmul(hipblasHandle_t &handle, const float *A, const float *B, float *C, const int m, const int k, const int n) {
int lda=m, ldb=k, ldc=m;
const float alf =1;
const float bet =0;
const float *alpha = &alf;
const float *beta =&bet;
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
}
__global__ void
forward_prop_kernel(float *device_output, float *input, float *weights, int num_first, int num_second) {
int linearThreadIndex = threadIdx.x;
int unit = blockIdx.x;
__shared__ float prefixSumInput[BLOCKSIZE];
__shared__ float prefixSumOutput[BLOCKSIZE];
__shared__ float prefixSumScratch[2 * BLOCKSIZE];
if (linearThreadIndex < num_first) {
prefixSumInput[linearThreadIndex] = input[linearThreadIndex] * weights[linearThreadIndex*num_second + unit];
}
__syncthreads();
sharedMemExclusiveScan(linearThreadIndex, prefixSumInput, prefixSumOutput,
prefixSumScratch, BLOCKSIZE);
__syncthreads();
if (linearThreadIndex == 0 && unit < num_second) {
device_output[unit] = 1/(1+exp(-1*prefixSumOutput[num_first]));
// device_output[unit] = prefixSumOutput[num_first];
}
}
// first, second -> input=input+1, nhidden
// first, second -> hidden=hidden+1, noutput
__global__ void
forward_prop_kernel_batch(float *device_output, float *input, float *weights, int num_first, int num_second, int batchSize) {
int linearThreadIndex = threadIdx.x;
// PRINT LINEAR THREAD INDEX TO DEBUG
int unit = blockIdx.x%num_second;
int batch = blockIdx.x/num_second;
__shared__ float prefixSumInput[BLOCKSIZE];
__shared__ float prefixSumOutput[BLOCKSIZE];
__shared__ float prefixSumScratch[2 * BLOCKSIZE];
if (linearThreadIndex < num_first) {
prefixSumInput[linearThreadIndex] = input[batch*linearThreadIndex] * weights[linearThreadIndex*num_second + unit];
}
__syncthreads();
sharedMemExclusiveScan(linearThreadIndex, prefixSumInput, prefixSumOutput,
prefixSumScratch, BLOCKSIZE);
__syncthreads();
if (linearThreadIndex == 0 && unit < num_second) {
device_output[batch*unit] = 1/(1+exp(-1*prefixSumOutput[num_first]));
}
}
/*******************************************************************
* Constructor
********************************************************************/
neuralNetwork::neuralNetwork(int nI, int nH, int nO, int bS) : nInput(nI), nHidden(nH), nOutput(nO), batchSize(bS)
{
//create neuron lists
//--------------------------------------------------------------------------------------------------------
inputNeurons = new( float[batchSize*(nInput + 1)] );
for (int b= 0; b<batchSize; b++) {
for (int i=0; i<nInput+1; i++) {
if (i==nInput) {
inputNeurons[(b+1)*(nInput)] = -1;
}
else {
inputNeurons[b*(nInput+1) + i] = 0;
}
}
}
//create input bias neuron
// inputNeurons[nInput] = -1;
hiddenNeurons = new( float[batchSize*(nHidden + 1)] );
for (int b=0; b<batchSize; b++) {
for (int i=0; i<nHidden+1; i++) {
if (i==nHidden) {
hiddenNeurons[(b+1)*(nHidden)] = -1;
}
else {
hiddenNeurons[b*(nHidden+1) + i] = 0;
}
}
}
// for ( int i=0; i < nHidden; i++ ) hiddenNeurons[i] = 0;
//create hidden bias neuron
// hiddenNeurons[nHidden] = -1;
// outputNeurons = new( float[nOutput] );
outputNeurons = new( float[batchSize*(nOutput + 1)] );
for ( int i=0; i < batchSize*(nOutput+1); i++ ) {
outputNeurons[i] = 0;
}
// for ( int i=0; i < nOutput; i++ ) outputNeurons[i] = 0;
//create weight lists (include bias neuron weights)
//--------------------------------------------------------------------------------------------------------
wInputHidden = new( float*[nInput + 1] );
wInputHidden[0] = new (float[(nInput + 1)*nHidden]);
for ( int i=1; i <= nInput; i++ ) {
wInputHidden[i] = wInputHidden[i-1] + nHidden;
}
for ( int i=0; i <= nInput; i++ )
{
for ( int j=0; j < nHidden; j++ ) wInputHidden[i][j] = 0;
}
wHiddenOutput = new( float*[nHidden + 1] );
wHiddenOutput[0] = new (float[(nHidden + 1)*nOutput]);
for ( int i=1; i <= nHidden; i++ ) {
wHiddenOutput[i] = wHiddenOutput[i-1] + nOutput;
}
for ( int i=0; i <= nHidden; i++ )
{
for ( int j=0; j < nOutput; j++ ) wHiddenOutput[i][j] = 0;
}
//initialize weights
//--------------------------------------------------------------------------------------------------------
initializeWeights();
}
/*******************************************************************
* Destructor
********************************************************************/
neuralNetwork::~neuralNetwork()
{
//delete neurons
delete[] inputNeurons;
delete[] hiddenNeurons;
delete[] outputNeurons;
//delete weight storage
for (int i=0; i <= nInput; i++) delete[] wInputHidden[i];
delete[] wInputHidden;
for (int j=0; j <= nHidden; j++) delete[] wHiddenOutput[j];
delete[] wHiddenOutput;
hipFree(device_output1);
hipFree(input);
hipFree(w1);
hipFree(device_output2);
hipFree(hidden);
hipFree(w2);
hipblasDestroy(handle);
}
/*******************************************************************
* Save Neuron Weights
*******************************************************************/
bool neuralNetwork::saveWeights(char* filename)
{
//open file for reading
fstream outputFile;
outputFile.open(filename, ios::out);
if ( outputFile.is_open() )
{
outputFile.precision(50);
//output weights
for ( int i=0; i <= nInput; i++ )
{
for ( int j=0; j < nHidden; j++ )
{
outputFile << wInputHidden[i][j] << ",";
}
}
for ( int i=0; i <= nHidden; i++ )
{
for ( int j=0; j < nOutput; j++ )
{
outputFile << wHiddenOutput[i][j];
if ( i * nOutput + j + 1 != (nHidden + 1) * nOutput ) outputFile << ",";
}
}
//print success
cout << endl << "Neuron weights saved to '" << filename << "'" << endl;
//close file
outputFile.close();
return true;
}
else
{
cout << endl << "Error - Weight output file '" << filename << "' could not be created: " << endl;
return false;
}
}
/*******************************************************************
* Return the NN accuracy on the set
********************************************************************/
double neuralNetwork::getSetAccuracy( std::vector<dataEntry*>& set )
{
double incorrectResults = 0;
//for every training input array
for ( int tp = 0; tp < (int) set.size(); tp++)
{
//feed inputs through network and backpropagate errors
feedForward( set[tp]->pattern );
int predicted = distance(outputNeurons, max_element(outputNeurons, outputNeurons + nOutput));
int expected = distance(set[tp]->target, max_element(set[tp]->target, set[tp]->target + nOutput));
if (predicted != expected) incorrectResults++;
}//end for
//calculate error and return as percentage
return 100 - (incorrectResults/set.size() * 100);
}
/*******************************************************************
* Initialize Neuron Weights
********************************************************************/
void neuralNetwork::initializeWeights()
{
double startTime = CycleTimer::currentSeconds();
hipblasCreate(&handle);
hipMalloc(&device_output1, sizeof(float) * batchSize*nHidden);
hipMalloc(&input, sizeof(float) * batchSize*(nInput+1));
hipMalloc(&w1, sizeof(float) * (nInput+1)*nHidden);
hipMalloc(&device_output2, sizeof(float) * batchSize*nOutput);
hipMalloc(&hidden, sizeof(float) * batchSize*(nHidden+1));
hipMalloc(&w2, sizeof(float) * (nHidden+1)*nOutput);
//set weights between input and hidden
//--------------------------------------------------------------------------------------------------------
for(int i = 0; i <= nInput; i++)
{
for(int j = 0; j < nHidden; j++)
{
//set weights to random values
wInputHidden[i][j] = ( (( (float)(rand()%1000)+1)/1000)/10 - 0.05);
}
}
//set weights between input and hidden
//--------------------------------------------------------------------------------------------------------
for(int i = 0; i <= nHidden; i++)
{
for(int j = 0; j < nOutput; j++)
{
//set weights to random values
wHiddenOutput[i][j] = ( (( (float)(rand()%1000)+1)/1000)/10 - 0.05);
}
}
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
printf("Time Taken Seq:%f\n", overallDuration);
}
/*******************************************************************
* Activation Function
********************************************************************/
inline float neuralNetwork::activationFunction( float x )
{
//sigmoid function
return 1/(1+exp(-x));
}
void neuralNetwork::feedForwardBatch(vector<float*> patternVector) {
for (int b = 0; b<batchSize; b++) {
for(int i = 0; i < nInput+1; i++) {
if (i!=nInput) {
inputNeurons[b*(nInput+1) + i] = patternVector[b][i];
}
}
}
dim3 blockDim(1024, 1);
dim3 gridDim(1024);//((1024*1024) + blockDim.x - 1) / blockDim.x);
hipMemcpy(input, inputNeurons, sizeof(float) * batchSize*(nInput+1), hipMemcpyHostToDevice);
hipMemcpy(w1, wInputHidden[0], (nInput+1)*nHidden*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( forward_prop_kernel_batch), dim3(gridDim), dim3(blockDim), 0, 0, device_output1, input, w1, nInput+1, nHidden, batchSize);
hipDeviceSynchronize();
hipMemcpy(hiddenNeurons, device_output1, batchSize*nHidden*sizeof(float), hipMemcpyDeviceToHost);
//w2 part
dim3 gridDim2(nOutput*batchSize);//((1024*1024) + blockDim.x - 1) / blockDim.x);
hipMemcpy(hidden, hiddenNeurons, sizeof(float) * batchSize*(nHidden+1), hipMemcpyHostToDevice);
hipMemcpy(w2, wHiddenOutput[0], (nHidden+1)*nOutput*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( forward_prop_kernel_batch), dim3(gridDim2), dim3(blockDim), 0, 0, device_output2, hidden, w2, nHidden+1, nOutput,batchSize);
hipDeviceSynchronize();
hipMemcpy(outputNeurons, device_output2, batchSize*nOutput*sizeof(float), hipMemcpyDeviceToHost);
}
/*******************************************************************
* Feed Forward Operation
********************************************************************/
void neuralNetwork::feedForward(float* pattern)
{
//set input neurons to input values
for(int i = 0; i < nInput; i++) {
inputNeurons[i] = pattern[i];
}
// double startTime = CycleTimer::currentSeconds();
dim3 blockDim(1024, 1);
dim3 gridDim(nHidden);//((1024*1024) + blockDim.x - 1) / blockDim.x);
hipMemcpy(input, inputNeurons, sizeof(float) * (nInput+1), hipMemcpyHostToDevice);
//double endTime1 = CycleTimer::currentSeconds();
hipMemcpy(w1, wInputHidden[0], (nInput+1)*nHidden*sizeof(float), hipMemcpyHostToDevice);
// double endTime2 = CycleTimer::currentSeconds();
// forward_prop_kernel<<<gridDim, blockDim>>>(device_output1, input, w1, nInput+1, nHidden);
// hipDeviceSynchronize();
// // double endTime3 = CycleTimer::currentSeconds();
// hipMemcpy(hiddenNeurons, device_output1, nHidden*sizeof(float), hipMemcpyDeviceToHost);
// // double endTime4 = CycleTimer::currentSeconds();
gpu_blas_mmul(handle, input, w1, device_output1, 1, nInput+1, nHidden);
hipMemcpy(hiddenNeurons, device_output1, nHidden*sizeof(float), hipMemcpyDeviceToHost);
/*float alpha = 1.0;
float beta = 0.0;
float* tempWeights = new float[(nInput+1)*nHidden];
for (int i=0; i<nInput +1; i++) {
for (int j=0; j<nHidden; j++) {
tempWeights[i*nHidden + j] = wInputHidden[i][j];
}
}
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, 1, nHidden, nInput+1, alpha, inputNeurons, nInput+1, tempWeights, nHidden, beta, hiddenNeurons, nHidden);*/
// double time1 = endTime1 - startTime;
// double time2 = endTime2 - endTime1;
// double time3 = endTime3 - endTime2;
// double time4 = endTime4 - endTime3;
// printf("Time 1:%f\n", time1);
// printf("Time 2:%f\n", time2);
// printf("Time 3:%f\n", time3);
// printf("Time 4:%f\n", time4);
//Calculate Hidden Layer values - include bias neuron
//--------------------------------------------------------------------------------------------------------
#pragma omp parallel
{
#pragma omp for
for (int j = 0; j<nHidden; j++) {
hiddenNeurons[j] = activationFunction( hiddenNeurons[j] );
}
/*
float temp = 0.0;
// #pragma omp for //schedule(static, 16)
for(int j=0; j < nHidden; j++)
{
temp = 0.0;
//clear value
hiddenNeurons[j] = 0;
//get weighted sum of pattern and bias neuron
// #pragma omp parallel for reduction(+ : temp)
for( int i=0; i <= nInput; i++ ) {
temp += inputNeurons[i] * wInputHidden[i][j];
}
// cout << "temp: " << hiddenNeurons[j] << endl;
//set to result of sigmoid
hiddenNeurons[j] = activationFunction( temp );
// cout << "output: " << hiddenNeurons[j] << endl;
}
// double endTime1 = CycleTimer::currentSeconds();
// printf("Time:%f\n", endTime1 - startTime);
//Calculating Output Layer values - include bias neuron
//--------------------------------------------------------------------------------------------------------
// #pragma omp for //schedule(static, 16)//reduction(+ : temp)
for(int k=0; k < nOutput; k++)
{
temp = 0.0;
//clear value
outputNeurons[k] = 0;
//get weighted sum of pattern and bias neuron
// #pragma omp for //reduction(+ : temp)
for( int j=0; j <= nHidden; j++ ) {
temp += hiddenNeurons[j] * wHiddenOutput[j][k];
}
//set to result of sigmoid
outputNeurons[k] = activationFunction( temp );
}*/
}
dim3 gridDim2(nOutput);//((1024*1024) + blockDim.x - 1) / blockDim.x);
hipMemcpy(hidden, hiddenNeurons, sizeof(float) * (nHidden+1), hipMemcpyHostToDevice);
// double endTime1 = CycleTimer::currentSeconds();
hipMemcpy(w2, wHiddenOutput[0], (nHidden+1)*nOutput*sizeof(float), hipMemcpyHostToDevice);
// double endTime2 = CycleTimer::currentSeconds();
// forward_prop_kernel<<<gridDim2, blockDim>>>(device_output2, hidden, w2, nHidden+1, nOutput);
// hipDeviceSynchronize();
// // double endTime3 = CycleTimer::currentSeconds();
// hipMemcpy(outputNeurons, device_output2, nOutput*sizeof(float), hipMemcpyDeviceToHost);
// double endTime4 = CycleTimer::currentSeconds();
gpu_blas_mmul(handle, hidden, w2, device_output2, 1, nHidden+1, nOutput);
hipMemcpy(outputNeurons, device_output2, nOutput*sizeof(float), hipMemcpyDeviceToHost);
#pragma omp parallel
{
#pragma omp for
for (int k = 0; k<nOutput; k++) {
outputNeurons[j] = activationFunction( outputNeurons[k] );
}
}
// double endTime3 = CycleTimer::currentSeconds();
// double time = endTime3 - startTime;
// cout << "Forward = " << time << endl;
}
void neuralNetwork::printCudaInfo()
{
// for fun, just print out some stats on the machine
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++)
{
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
| 33b3bdd2a0713d55a4a8336058e535e096c078c0.cu | //standard includes
#include <iostream>
#include <vector>
#include <fstream>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <omp.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cublas_v2.h>
#include "CycleTimer.h"
#define BLOCKSIZE 1024
#define SCAN_BLOCK_DIM BLOCKSIZE
#include "exclusiveScan.cu_inl"
//include definition file
#include "neuralNetwork.h"
//#include "/afs/cs/academic/class/15418-s17/public/sw/OpenBLAS/cblas.h"
//#include <openblas/cblas.h>
using namespace std;
void gpu_blas_mmul(cublasHandle_t &handle, const float *A, const float *B, float *C, const int m, const int k, const int n) {
int lda=m, ldb=k, ldc=m;
const float alf =1;
const float bet =0;
const float *alpha = &alf;
const float *beta =&bet;
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
}
__global__ void
forward_prop_kernel(float *device_output, float *input, float *weights, int num_first, int num_second) {
int linearThreadIndex = threadIdx.x;
int unit = blockIdx.x;
__shared__ float prefixSumInput[BLOCKSIZE];
__shared__ float prefixSumOutput[BLOCKSIZE];
__shared__ float prefixSumScratch[2 * BLOCKSIZE];
if (linearThreadIndex < num_first) {
prefixSumInput[linearThreadIndex] = input[linearThreadIndex] * weights[linearThreadIndex*num_second + unit];
}
__syncthreads();
sharedMemExclusiveScan(linearThreadIndex, prefixSumInput, prefixSumOutput,
prefixSumScratch, BLOCKSIZE);
__syncthreads();
if (linearThreadIndex == 0 && unit < num_second) {
device_output[unit] = 1/(1+exp(-1*prefixSumOutput[num_first]));
// device_output[unit] = prefixSumOutput[num_first];
}
}
// first, second -> input=input+1, nhidden
// first, second -> hidden=hidden+1, noutput
__global__ void
forward_prop_kernel_batch(float *device_output, float *input, float *weights, int num_first, int num_second, int batchSize) {
int linearThreadIndex = threadIdx.x;
// PRINT LINEAR THREAD INDEX TO DEBUG
int unit = blockIdx.x%num_second;
int batch = blockIdx.x/num_second;
__shared__ float prefixSumInput[BLOCKSIZE];
__shared__ float prefixSumOutput[BLOCKSIZE];
__shared__ float prefixSumScratch[2 * BLOCKSIZE];
if (linearThreadIndex < num_first) {
prefixSumInput[linearThreadIndex] = input[batch*linearThreadIndex] * weights[linearThreadIndex*num_second + unit];
}
__syncthreads();
sharedMemExclusiveScan(linearThreadIndex, prefixSumInput, prefixSumOutput,
prefixSumScratch, BLOCKSIZE);
__syncthreads();
if (linearThreadIndex == 0 && unit < num_second) {
device_output[batch*unit] = 1/(1+exp(-1*prefixSumOutput[num_first]));
}
}
/*******************************************************************
* Constructor
********************************************************************/
neuralNetwork::neuralNetwork(int nI, int nH, int nO, int bS) : nInput(nI), nHidden(nH), nOutput(nO), batchSize(bS)
{
//create neuron lists
//--------------------------------------------------------------------------------------------------------
inputNeurons = new( float[batchSize*(nInput + 1)] );
for (int b= 0; b<batchSize; b++) {
for (int i=0; i<nInput+1; i++) {
if (i==nInput) {
inputNeurons[(b+1)*(nInput)] = -1;
}
else {
inputNeurons[b*(nInput+1) + i] = 0;
}
}
}
//create input bias neuron
// inputNeurons[nInput] = -1;
hiddenNeurons = new( float[batchSize*(nHidden + 1)] );
for (int b=0; b<batchSize; b++) {
for (int i=0; i<nHidden+1; i++) {
if (i==nHidden) {
hiddenNeurons[(b+1)*(nHidden)] = -1;
}
else {
hiddenNeurons[b*(nHidden+1) + i] = 0;
}
}
}
// for ( int i=0; i < nHidden; i++ ) hiddenNeurons[i] = 0;
//create hidden bias neuron
// hiddenNeurons[nHidden] = -1;
// outputNeurons = new( float[nOutput] );
outputNeurons = new( float[batchSize*(nOutput + 1)] );
for ( int i=0; i < batchSize*(nOutput+1); i++ ) {
outputNeurons[i] = 0;
}
// for ( int i=0; i < nOutput; i++ ) outputNeurons[i] = 0;
//create weight lists (include bias neuron weights)
//--------------------------------------------------------------------------------------------------------
wInputHidden = new( float*[nInput + 1] );
wInputHidden[0] = new (float[(nInput + 1)*nHidden]);
for ( int i=1; i <= nInput; i++ ) {
wInputHidden[i] = wInputHidden[i-1] + nHidden;
}
for ( int i=0; i <= nInput; i++ )
{
for ( int j=0; j < nHidden; j++ ) wInputHidden[i][j] = 0;
}
wHiddenOutput = new( float*[nHidden + 1] );
wHiddenOutput[0] = new (float[(nHidden + 1)*nOutput]);
for ( int i=1; i <= nHidden; i++ ) {
wHiddenOutput[i] = wHiddenOutput[i-1] + nOutput;
}
for ( int i=0; i <= nHidden; i++ )
{
for ( int j=0; j < nOutput; j++ ) wHiddenOutput[i][j] = 0;
}
//initialize weights
//--------------------------------------------------------------------------------------------------------
initializeWeights();
}
/*******************************************************************
* Destructor
********************************************************************/
neuralNetwork::~neuralNetwork()
{
//delete neurons
delete[] inputNeurons;
delete[] hiddenNeurons;
delete[] outputNeurons;
//delete weight storage
for (int i=0; i <= nInput; i++) delete[] wInputHidden[i];
delete[] wInputHidden;
for (int j=0; j <= nHidden; j++) delete[] wHiddenOutput[j];
delete[] wHiddenOutput;
cudaFree(device_output1);
cudaFree(input);
cudaFree(w1);
cudaFree(device_output2);
cudaFree(hidden);
cudaFree(w2);
cublasDestroy(handle);
}
/*******************************************************************
* Save Neuron Weights
*******************************************************************/
bool neuralNetwork::saveWeights(char* filename)
{
//open file for reading
fstream outputFile;
outputFile.open(filename, ios::out);
if ( outputFile.is_open() )
{
outputFile.precision(50);
//output weights
for ( int i=0; i <= nInput; i++ )
{
for ( int j=0; j < nHidden; j++ )
{
outputFile << wInputHidden[i][j] << ",";
}
}
for ( int i=0; i <= nHidden; i++ )
{
for ( int j=0; j < nOutput; j++ )
{
outputFile << wHiddenOutput[i][j];
if ( i * nOutput + j + 1 != (nHidden + 1) * nOutput ) outputFile << ",";
}
}
//print success
cout << endl << "Neuron weights saved to '" << filename << "'" << endl;
//close file
outputFile.close();
return true;
}
else
{
cout << endl << "Error - Weight output file '" << filename << "' could not be created: " << endl;
return false;
}
}
/*******************************************************************
* Return the NN accuracy on the set
********************************************************************/
double neuralNetwork::getSetAccuracy( std::vector<dataEntry*>& set )
{
double incorrectResults = 0;
//for every training input array
for ( int tp = 0; tp < (int) set.size(); tp++)
{
//feed inputs through network and backpropagate errors
feedForward( set[tp]->pattern );
int predicted = distance(outputNeurons, max_element(outputNeurons, outputNeurons + nOutput));
int expected = distance(set[tp]->target, max_element(set[tp]->target, set[tp]->target + nOutput));
if (predicted != expected) incorrectResults++;
}//end for
//calculate error and return as percentage
return 100 - (incorrectResults/set.size() * 100);
}
/*******************************************************************
* Initialize Neuron Weights
********************************************************************/
void neuralNetwork::initializeWeights()
{
double startTime = CycleTimer::currentSeconds();
cublasCreate(&handle);
cudaMalloc(&device_output1, sizeof(float) * batchSize*nHidden);
cudaMalloc(&input, sizeof(float) * batchSize*(nInput+1));
cudaMalloc(&w1, sizeof(float) * (nInput+1)*nHidden);
cudaMalloc(&device_output2, sizeof(float) * batchSize*nOutput);
cudaMalloc(&hidden, sizeof(float) * batchSize*(nHidden+1));
cudaMalloc(&w2, sizeof(float) * (nHidden+1)*nOutput);
//set weights between input and hidden
//--------------------------------------------------------------------------------------------------------
for(int i = 0; i <= nInput; i++)
{
for(int j = 0; j < nHidden; j++)
{
//set weights to random values
wInputHidden[i][j] = ( (( (float)(rand()%1000)+1)/1000)/10 - 0.05);
}
}
//set weights between input and hidden
//--------------------------------------------------------------------------------------------------------
for(int i = 0; i <= nHidden; i++)
{
for(int j = 0; j < nOutput; j++)
{
//set weights to random values
wHiddenOutput[i][j] = ( (( (float)(rand()%1000)+1)/1000)/10 - 0.05);
}
}
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
printf("Time Taken Seq:%f\n", overallDuration);
}
/*******************************************************************
* Activation Function
********************************************************************/
inline float neuralNetwork::activationFunction( float x )
{
//sigmoid function
return 1/(1+exp(-x));
}
void neuralNetwork::feedForwardBatch(vector<float*> patternVector) {
for (int b = 0; b<batchSize; b++) {
for(int i = 0; i < nInput+1; i++) {
if (i!=nInput) {
inputNeurons[b*(nInput+1) + i] = patternVector[b][i];
}
}
}
dim3 blockDim(1024, 1);
dim3 gridDim(1024);//((1024*1024) + blockDim.x - 1) / blockDim.x);
cudaMemcpy(input, inputNeurons, sizeof(float) * batchSize*(nInput+1), cudaMemcpyHostToDevice);
cudaMemcpy(w1, wInputHidden[0], (nInput+1)*nHidden*sizeof(float), cudaMemcpyHostToDevice);
forward_prop_kernel_batch<<<gridDim, blockDim>>>(device_output1, input, w1, nInput+1, nHidden, batchSize);
cudaThreadSynchronize();
cudaMemcpy(hiddenNeurons, device_output1, batchSize*nHidden*sizeof(float), cudaMemcpyDeviceToHost);
//w2 part
dim3 gridDim2(nOutput*batchSize);//((1024*1024) + blockDim.x - 1) / blockDim.x);
cudaMemcpy(hidden, hiddenNeurons, sizeof(float) * batchSize*(nHidden+1), cudaMemcpyHostToDevice);
cudaMemcpy(w2, wHiddenOutput[0], (nHidden+1)*nOutput*sizeof(float), cudaMemcpyHostToDevice);
forward_prop_kernel_batch<<<gridDim2, blockDim>>>(device_output2, hidden, w2, nHidden+1, nOutput,batchSize);
cudaThreadSynchronize();
cudaMemcpy(outputNeurons, device_output2, batchSize*nOutput*sizeof(float), cudaMemcpyDeviceToHost);
}
/*******************************************************************
* Feed Forward Operation
********************************************************************/
void neuralNetwork::feedForward(float* pattern)
{
//set input neurons to input values
for(int i = 0; i < nInput; i++) {
inputNeurons[i] = pattern[i];
}
// double startTime = CycleTimer::currentSeconds();
dim3 blockDim(1024, 1);
dim3 gridDim(nHidden);//((1024*1024) + blockDim.x - 1) / blockDim.x);
cudaMemcpy(input, inputNeurons, sizeof(float) * (nInput+1), cudaMemcpyHostToDevice);
//double endTime1 = CycleTimer::currentSeconds();
cudaMemcpy(w1, wInputHidden[0], (nInput+1)*nHidden*sizeof(float), cudaMemcpyHostToDevice);
// double endTime2 = CycleTimer::currentSeconds();
// forward_prop_kernel<<<gridDim, blockDim>>>(device_output1, input, w1, nInput+1, nHidden);
// cudaThreadSynchronize();
// // double endTime3 = CycleTimer::currentSeconds();
// cudaMemcpy(hiddenNeurons, device_output1, nHidden*sizeof(float), cudaMemcpyDeviceToHost);
// // double endTime4 = CycleTimer::currentSeconds();
gpu_blas_mmul(handle, input, w1, device_output1, 1, nInput+1, nHidden);
cudaMemcpy(hiddenNeurons, device_output1, nHidden*sizeof(float), cudaMemcpyDeviceToHost);
/*float alpha = 1.0;
float beta = 0.0;
float* tempWeights = new float[(nInput+1)*nHidden];
for (int i=0; i<nInput +1; i++) {
for (int j=0; j<nHidden; j++) {
tempWeights[i*nHidden + j] = wInputHidden[i][j];
}
}
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, 1, nHidden, nInput+1, alpha, inputNeurons, nInput+1, tempWeights, nHidden, beta, hiddenNeurons, nHidden);*/
// double time1 = endTime1 - startTime;
// double time2 = endTime2 - endTime1;
// double time3 = endTime3 - endTime2;
// double time4 = endTime4 - endTime3;
// printf("Time 1:%f\n", time1);
// printf("Time 2:%f\n", time2);
// printf("Time 3:%f\n", time3);
// printf("Time 4:%f\n", time4);
//Calculate Hidden Layer values - include bias neuron
//--------------------------------------------------------------------------------------------------------
#pragma omp parallel
{
#pragma omp for
for (int j = 0; j<nHidden; j++) {
hiddenNeurons[j] = activationFunction( hiddenNeurons[j] );
}
/*
float temp = 0.0;
// #pragma omp for //schedule(static, 16)
for(int j=0; j < nHidden; j++)
{
temp = 0.0;
//clear value
hiddenNeurons[j] = 0;
//get weighted sum of pattern and bias neuron
// #pragma omp parallel for reduction(+ : temp)
for( int i=0; i <= nInput; i++ ) {
temp += inputNeurons[i] * wInputHidden[i][j];
}
// cout << "temp: " << hiddenNeurons[j] << endl;
//set to result of sigmoid
hiddenNeurons[j] = activationFunction( temp );
// cout << "output: " << hiddenNeurons[j] << endl;
}
// double endTime1 = CycleTimer::currentSeconds();
// printf("Time:%f\n", endTime1 - startTime);
//Calculating Output Layer values - include bias neuron
//--------------------------------------------------------------------------------------------------------
// #pragma omp for //schedule(static, 16)//reduction(+ : temp)
for(int k=0; k < nOutput; k++)
{
temp = 0.0;
//clear value
outputNeurons[k] = 0;
//get weighted sum of pattern and bias neuron
// #pragma omp for //reduction(+ : temp)
for( int j=0; j <= nHidden; j++ ) {
temp += hiddenNeurons[j] * wHiddenOutput[j][k];
}
//set to result of sigmoid
outputNeurons[k] = activationFunction( temp );
}*/
}
dim3 gridDim2(nOutput);//((1024*1024) + blockDim.x - 1) / blockDim.x);
cudaMemcpy(hidden, hiddenNeurons, sizeof(float) * (nHidden+1), cudaMemcpyHostToDevice);
// double endTime1 = CycleTimer::currentSeconds();
cudaMemcpy(w2, wHiddenOutput[0], (nHidden+1)*nOutput*sizeof(float), cudaMemcpyHostToDevice);
// double endTime2 = CycleTimer::currentSeconds();
// forward_prop_kernel<<<gridDim2, blockDim>>>(device_output2, hidden, w2, nHidden+1, nOutput);
// cudaThreadSynchronize();
// // double endTime3 = CycleTimer::currentSeconds();
// cudaMemcpy(outputNeurons, device_output2, nOutput*sizeof(float), cudaMemcpyDeviceToHost);
// double endTime4 = CycleTimer::currentSeconds();
gpu_blas_mmul(handle, hidden, w2, device_output2, 1, nHidden+1, nOutput);
cudaMemcpy(outputNeurons, device_output2, nOutput*sizeof(float), cudaMemcpyDeviceToHost);
#pragma omp parallel
{
#pragma omp for
for (int k = 0; k<nOutput; k++) {
outputNeurons[j] = activationFunction( outputNeurons[k] );
}
}
// double endTime3 = CycleTimer::currentSeconds();
// double time = endTime3 - startTime;
// cout << "Forward = " << time << endl;
}
void neuralNetwork::printCudaInfo()
{
// for fun, just print out some stats on the machine
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++)
{
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
b69f866b16b6dd66a4699cd886984e75566e4c0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <stdio.h>
#include <algorithm>
#include <iterator>
#include <utility>
#include <math.h>
#include <omp.h>
#include <hip/hip_runtime.h>
// #include "mttkrp_mpi.h"
#include "mttkrp_cpu.h"
#include "mttkrp_gpu.h"
#include "cpd_cpu.h"
#include <bits/stdc++.h>
using namespace std;
int main(int argc, char* argv[]){
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
Options Opt = parse_cmd_options(argc, argv);
Tensor X;
load_tensor(X, Opt);
sort_COOtensor(X);
check_opt(X, Opt); //check options are good
// MPI_param MPIparam;
TiledTensor TiledX[Opt.nTile];
Matrix *U = new Matrix[X.ndims];
create_mats(X, U, Opt, false);
randomize_mats(X, U, Opt);
// if(Opt.impType != 12 && Opt.impType != 14 )
// zero_mat(X, U, Opt.mode); // not sure about the cpu code
if(Opt.verbose)
cout << endl << "Starting MTTKRP..." << endl;
// print tensors and statistics
if(Opt.impType == 0){
double t0 = seconds();
// print_COOtensor(X);
create_HCSR(X, Opt);
get_nnzPerFiberData(X);
tensor_stats(X);
// ((X.ndims == 3) ? print_HCSRtensor(X) : print_HCSRtensor_4D(X));
}
// COO CPU
if(Opt.impType == 1){
double t0 = seconds();
((X.ndims == 3) ? MTTKRP_COO_CPU(X, U, Opt) : MTTKRP_COO_CPU_4D(X, U, Opt));
printf("COO CPU - time: %.3f sec \n", seconds() - t0);
}
// HCSR CPU
else if(Opt.impType == 2){
create_HCSR(X, Opt);
int *curMode = new int [X.ndims];
for (int m = 0; m < X.ndims; ++m)
curMode[m] = (m + Opt.mode) % X.ndims;
double t0 = seconds();
((X.ndims == 3) ? MTTKRP_HCSR_CPU(X, TiledX, U, Opt) : MTTKRP_HCSR_CPU_4D(X, U, Opt));
printf("gcc no opt : HCSR CPU - time: %.3f sec \n", seconds() - t0);
}
// COO GPU
else if(Opt.impType == 3){
MTTKRP_COO_GPU(X, U, Opt);
}
// HCSR GPU
else if(Opt.impType == 4){
create_HCSR(X, Opt);
MTTKRP_HCSR_GPU(X, U, Opt);
}
// HYB CPU
else if(Opt.impType == 10){
create_HCSR(X, Opt);
HYBTensor HybX(X);
cout << "Creating HYB... " ;
double t0 = seconds();
((X.ndims == 3) ? create_HYB(HybX, X, Opt) : create_HYB_4D(HybX, X, Opt));
printf("create HYB - time: %.3f sec \n", seconds() - t0);
make_HybBin(HybX, Opt);
// print_HYBtensor(HybX);
// ((X.ndims == 3) ? MTTKRP_HYB_CPU(HybX, U, Opt) : MTTKRP_HYB_CPU_4D(HybX, U, Opt));
MTTKRP_HYB_GPU(HybX, U, Opt);
}
/* Tiled versions */
else if(Opt.impType >= 5 && Opt.impType < 10){
create_HCSR(X, Opt);
int tilingMode = X.modeOrder[X.ndims -1];
Opt.tileSize = (X.dims[tilingMode] + Opt.nTile - 1)/Opt.nTile;
if(Opt.nTile > X.dims[tilingMode]){
cout << "Number of tiles ("<< Opt.nTile << ") should be as minimum as K's dimension (" << X.dims[tilingMode] << "). Exiting."<< endl ;
exit(0);
}
// split X into tiles based on K indices
make_KTiling(X, TiledX, Opt);
// create HCSR for each tile
for (int tile = 0; tile < Opt.nTile; ++tile){
if(TiledX[tile].totNnz > 0){
create_TiledHCSR(TiledX, Opt, tile);
}
// print_TiledHCSRtensor(TiledX, tile);
}
// Split tiles into bins accordin to nnz in slice
for (int tile = 0; tile < Opt.nTile; ++tile){
if(TiledX[tile].totNnz > 0)
make_TiledBin(TiledX, Opt, tile);
}
// COO GPU
if(Opt.impType == 5){
double t0 = seconds();
MTTKRP_TILED_COO_CPU(TiledX, U, Opt);
printf("TILED COO CPU - time: %.3f sec \n", seconds() - t0);
}
// HCSR GPU
else if(Opt.impType == 6){
double t0 = seconds();
((X.ndims == 3) ? MTTKRP_TILED_HCSR_CPU(TiledX, U, Opt) : MTTKRP_TILED_HCSR_CPU_4D(TiledX, U, Opt));
printf("TILED HCSR CPU - time: %.3f sec \n", seconds() - t0);
}
// TILED COO GPU
else if(Opt.impType == 7){
cout << "GPU COO has bugs! " << endl;
MTTKRP_TILED_COO_GPU(TiledX, U, Opt);
}
// TILED HCSR GPU
else if(Opt.impType == 8){
cout << "Sorted mode: " << X.modeOrder[0] << " " << X.modeOrder[1] << " " <<X.modeOrder[2] << endl;
create_fbrLikeSlcInds(TiledX, 0);
create_fbrLikeSlcInds(X, Opt);
MTTKRP_B_HCSR_GPU(TiledX, U, Opt);
}
// TILED + support all mode using same B-CSF
else if(Opt.impType == 9){
// int MTTKRPmode = 0;
for (int MTTKRPmode = 0; MTTKRPmode < X.ndims; ++MTTKRPmode){
randomize_mats(X, U, Opt);
zero_mat(X, U, MTTKRPmode);
MTTKRP_B_HCSR_GPU_ANYMODE(TiledX, U, Opt, MTTKRPmode);
}
}
}
/* single-CSF*/
else if(Opt.impType == 13 || Opt.impType == 14 ){
if(Opt.verbose)
cout << "Starting sameCSF: MTTKRP on all modes using same CSF" << endl;
sort_COOtensor(X);
create_HCSR(X, Opt);
// compute_reuse(X,Opt);
// compute_reuse_distance(X,Opt);
/* on CPU non tiled */
if(Opt.impType == 13){
for (int MTTKRPmode = 0; MTTKRPmode < X.ndims; ++MTTKRPmode) {
randomize_mats(X, U, Opt);
zero_mat(X, U, MTTKRPmode);
// if( MTTKRPmode == Opt.mode){
if( X.modeOrder[0] == MTTKRPmode)
((X.ndims == 3) ? MTTKRP_HCSR_CPU(X, TiledX, U, Opt) : MTTKRP_HCSR_CPU_4D(X, U, Opt));
// MTTKRPmode = (Opt.mode + 1) % X.ndims;
else if( X.modeOrder[1] == MTTKRPmode) {
create_fbrLikeSlcInds(X, Opt);
MTTKRP_HCSR_CPU_mode1(X, U, Opt, MTTKRPmode);
}
// // MTTKRPmode = (Opt.mode + 2) % X.ndims;
else if( X.modeOrder[2] == MTTKRPmode)
MTTKRP_HCSR_CPU_mode2(X, U, Opt, MTTKRPmode);
}
}
/* on GPU tiled (skipping on tiled gpu due to time constraints)*/
if(Opt.impType == 14){
int tilingMode = X.modeOrder[X.ndims -1];
Opt.tileSize = (X.dims[tilingMode] + Opt.nTile - 1)/Opt.nTile;
if(Opt.nTile > X.dims[tilingMode]){
cout << "Number of tiles ("<< Opt.nTile << ") should be as minimum as K's dimension (" << X.dims[tilingMode] << "). Exiting."<< endl ;
exit(0);
}
// print_HCSRtensor_4D(X);
// split X into tiles based on K indices
make_KTiling(X, TiledX, Opt);
// create HCSR for each tile
for (int tile = 0; tile < Opt.nTile; ++tile){
if(TiledX[tile].totNnz > 0){
create_TiledHCSR(TiledX, Opt, tile);
create_fbrLikeSlcInds(TiledX, tile);
// print_COOtensor(X);
// print_TiledHCSRtensor(TiledX, tile);
}
}
// Split tiles into bins accordin to nnz in slice
for (int tile = 0; tile < Opt.nTile; ++tile){
if(TiledX[tile].totNnz > 0)
make_TiledBin(TiledX, Opt, tile);
}
MTTKRP_ONE_HCSR_GPU(TiledX, U, Opt);
// MTTKRP_B_HCSR_GPU(TiledX, U, Opt);
}
}
/* MI-CSF*/
else if(Opt.impType == 11 || Opt.impType == 12){
double t0 = seconds();
if(Opt.verbose)
cout << "Starting MI-CSF" << endl;
/*Collect slice and fiber stats: Create CSF for all modes*/
bool slcNfbrStats = true;
Tensor *arrX = new Tensor[X.ndims];
if(slcNfbrStats){
for (int m = 0; m < X.ndims; ++m){
init_tensor(arrX, X, Opt, m);
if(m!= Opt.mode) //already sorted
t0 = seconds();
sort_COOtensor(arrX[m]);
// printf("sort - mode %d - %.3f\n", m, seconds() - t0);
t0 = seconds();
create_HCSR(arrX[m], Opt);
// printf("creat CSF - mode %d - %.3f\n", m, seconds() - t0);
// get_nnzPerFiberData(arrX[m]); //merge with createCSF
// create_hashtable(arrX[m]);
// cout << "created Hshtable" << endl;
// print_HCSRtensor(arrX[m]);
}
}
TiledTensor ModeWiseTiledX[X.ndims];
t0 = seconds();
//mm_partition_allMode(arrX, X, ModeWiseTiledX, Opt);
mm_partition_reuseBased(arrX, X, ModeWiseTiledX, Opt);
populate_paritions(X, ModeWiseTiledX);
// printf("mm_partition & populate - time: %.3f sec \n", seconds() - t0);
t0 = seconds();
double start_time = omp_get_wtime();
// omp_set_num_threads(X.ndims);
// #pragma omp parallel
{
// int threadnum = omp_get_thread_num(), numthreads = omp_get_num_threads();
// #pragma omp for
for (int m = 0; m < X.ndims; ++m){
if(ModeWiseTiledX[m].totNnz > 0){
sort_MI_CSF(X, ModeWiseTiledX, m);
create_TiledHCSR(ModeWiseTiledX, Opt, m);
create_fbrLikeSlcInds(ModeWiseTiledX, m);
make_TiledBin(ModeWiseTiledX, Opt, m);
// cout << "printing " << m << endl;
// print_TiledCOO(ModeWiseTiledX, m);
// print_TiledHCSRtensor(ModeWiseTiledX, m);
// compute_reuse(ModeWiseTiledX, Opt, m);
}
// cout << threadnum << " " << numthreads << endl;
}
}
double omp_time = omp_get_wtime() - start_time;
// printf("Sort,createCSF,createFbrIND - time: %.3f sec, %g \n", seconds() - t0, omp_time);
/* on CPU */
if(Opt.impType == 11){
for (int MTTKRPmode = 0; MTTKRPmode < X.ndims; ++MTTKRPmode){
cout << "MTTKRP on mode " << MTTKRPmode << " using MI-CSF" << endl;
randomize_mats(X, U, Opt);
zero_mat(X, U, MTTKRPmode);
for (int m = 0; m < X.ndims; ++m){
int mode0 = ModeWiseTiledX[m].modeOrder[0];
int mode1 = ModeWiseTiledX[m].modeOrder[1];
int mode2 = ModeWiseTiledX[m].modeOrder[2];
if (mode0 == MTTKRPmode && ModeWiseTiledX[m].totNnz)
MTTKRP_MIHCSR_CPU(ModeWiseTiledX, U, Opt, m);
else if (mode1 == MTTKRPmode && ModeWiseTiledX[m].totNnz ){
// create_fbrLikeSlcInds(ModeWiseTiledX, U, Opt, m, MTTKRPmode);
MTTKRP_MIHCSR_CPU_FBR_ATOMICS(ModeWiseTiledX, U, Opt, m, MTTKRPmode);
}
else if (mode2 == MTTKRPmode && ModeWiseTiledX[m].totNnz )
MTTKRP_MIHCSR_CPU_ALL_ATOMICS(ModeWiseTiledX, U, Opt, m, MTTKRPmode);
}
}
}
/* on GPU */
else if(Opt.impType == 12){
MTTKRP_MIHCSR_GPU(ModeWiseTiledX, U, Opt);
}
// printf("MIHCSR incl CPU - time: %.3f sec \n", seconds() - t0);
}
else // e.g. -1
cout << "no MTTKRP" << endl;
if(!Opt.outFileName.empty()){
write_output(U, Opt.mode, Opt.outFileName);
}
if(Opt.correctness){
if (Opt.impType == 1) {
cout << "Already running COO seq on CPU!" << endl;
exit(0);
}
if(Opt.verbose && (Opt.impType == 12 || Opt.impType == 14))
cout << "checking only the last mode. " << endl;
// Opt.mode = 0;//X.modeOrder[2];
// int MTTKRPmode = 2;
Opt.mode = ((Opt.impType == 12 || Opt.impType == 14 ) ? X.ndims-1 : Opt.mode);
int mode = Opt.mode;
int nr = U[mode].nRows;
int nc = U[mode].nCols;
DTYPE *out = (DTYPE*)malloc(nr * nc * sizeof(DTYPE));
memcpy(out, U[mode].vals, nr*nc * sizeof(DTYPE));
print_matrix(U, mode);
randomize_mats(X, U, Opt);
zero_mat(X, U, mode);
cout << "correctness with COO on mode " << mode <<". "<< endl;
((X.ndims == 3) ? MTTKRP_COO_CPU(X, U, Opt) : MTTKRP_COO_CPU_4D(X, U, Opt));
// MTTKRP_HCSR_CPU_slc(X, TiledX, U, Opt);
print_matrix(U, mode);
correctness_check(out, U[mode].vals, nr, nc);
}
}
| b69f866b16b6dd66a4699cd886984e75566e4c0b.cu | #include <fstream>
#include <stdio.h>
#include <algorithm>
#include <iterator>
#include <utility>
#include <math.h>
#include <omp.h>
#include <cuda.h>
// #include "mttkrp_mpi.h"
#include "mttkrp_cpu.h"
#include "mttkrp_gpu.h"
#include "cpd_cpu.h"
#include <bits/stdc++.h>
using namespace std;
int main(int argc, char* argv[]){
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
Options Opt = parse_cmd_options(argc, argv);
Tensor X;
load_tensor(X, Opt);
sort_COOtensor(X);
check_opt(X, Opt); //check options are good
// MPI_param MPIparam;
TiledTensor TiledX[Opt.nTile];
Matrix *U = new Matrix[X.ndims];
create_mats(X, U, Opt, false);
randomize_mats(X, U, Opt);
// if(Opt.impType != 12 && Opt.impType != 14 )
// zero_mat(X, U, Opt.mode); // not sure about the cpu code
if(Opt.verbose)
cout << endl << "Starting MTTKRP..." << endl;
// print tensors and statistics
if(Opt.impType == 0){
double t0 = seconds();
// print_COOtensor(X);
create_HCSR(X, Opt);
get_nnzPerFiberData(X);
tensor_stats(X);
// ((X.ndims == 3) ? print_HCSRtensor(X) : print_HCSRtensor_4D(X));
}
// COO CPU
if(Opt.impType == 1){
double t0 = seconds();
((X.ndims == 3) ? MTTKRP_COO_CPU(X, U, Opt) : MTTKRP_COO_CPU_4D(X, U, Opt));
printf("COO CPU - time: %.3f sec \n", seconds() - t0);
}
// HCSR CPU
else if(Opt.impType == 2){
create_HCSR(X, Opt);
int *curMode = new int [X.ndims];
for (int m = 0; m < X.ndims; ++m)
curMode[m] = (m + Opt.mode) % X.ndims;
double t0 = seconds();
((X.ndims == 3) ? MTTKRP_HCSR_CPU(X, TiledX, U, Opt) : MTTKRP_HCSR_CPU_4D(X, U, Opt));
printf("gcc no opt : HCSR CPU - time: %.3f sec \n", seconds() - t0);
}
// COO GPU
else if(Opt.impType == 3){
MTTKRP_COO_GPU(X, U, Opt);
}
// HCSR GPU
else if(Opt.impType == 4){
create_HCSR(X, Opt);
MTTKRP_HCSR_GPU(X, U, Opt);
}
// HYB CPU
else if(Opt.impType == 10){
create_HCSR(X, Opt);
HYBTensor HybX(X);
cout << "Creating HYB... " ;
double t0 = seconds();
((X.ndims == 3) ? create_HYB(HybX, X, Opt) : create_HYB_4D(HybX, X, Opt));
printf("create HYB - time: %.3f sec \n", seconds() - t0);
make_HybBin(HybX, Opt);
// print_HYBtensor(HybX);
// ((X.ndims == 3) ? MTTKRP_HYB_CPU(HybX, U, Opt) : MTTKRP_HYB_CPU_4D(HybX, U, Opt));
MTTKRP_HYB_GPU(HybX, U, Opt);
}
/* Tiled versions */
else if(Opt.impType >= 5 && Opt.impType < 10){
create_HCSR(X, Opt);
int tilingMode = X.modeOrder[X.ndims -1];
Opt.tileSize = (X.dims[tilingMode] + Opt.nTile - 1)/Opt.nTile;
if(Opt.nTile > X.dims[tilingMode]){
cout << "Number of tiles ("<< Opt.nTile << ") should be as minimum as K's dimension (" << X.dims[tilingMode] << "). Exiting."<< endl ;
exit(0);
}
// split X into tiles based on K indices
make_KTiling(X, TiledX, Opt);
// create HCSR for each tile
for (int tile = 0; tile < Opt.nTile; ++tile){
if(TiledX[tile].totNnz > 0){
create_TiledHCSR(TiledX, Opt, tile);
}
// print_TiledHCSRtensor(TiledX, tile);
}
// Split tiles into bins accordin to nnz in slice
for (int tile = 0; tile < Opt.nTile; ++tile){
if(TiledX[tile].totNnz > 0)
make_TiledBin(TiledX, Opt, tile);
}
// COO GPU
if(Opt.impType == 5){
double t0 = seconds();
MTTKRP_TILED_COO_CPU(TiledX, U, Opt);
printf("TILED COO CPU - time: %.3f sec \n", seconds() - t0);
}
// HCSR GPU
else if(Opt.impType == 6){
double t0 = seconds();
((X.ndims == 3) ? MTTKRP_TILED_HCSR_CPU(TiledX, U, Opt) : MTTKRP_TILED_HCSR_CPU_4D(TiledX, U, Opt));
printf("TILED HCSR CPU - time: %.3f sec \n", seconds() - t0);
}
// TILED COO GPU
else if(Opt.impType == 7){
cout << "GPU COO has bugs! " << endl;
MTTKRP_TILED_COO_GPU(TiledX, U, Opt);
}
// TILED HCSR GPU
else if(Opt.impType == 8){
cout << "Sorted mode: " << X.modeOrder[0] << " " << X.modeOrder[1] << " " <<X.modeOrder[2] << endl;
create_fbrLikeSlcInds(TiledX, 0);
create_fbrLikeSlcInds(X, Opt);
MTTKRP_B_HCSR_GPU(TiledX, U, Opt);
}
// TILED + support all mode using same B-CSF
else if(Opt.impType == 9){
// int MTTKRPmode = 0;
for (int MTTKRPmode = 0; MTTKRPmode < X.ndims; ++MTTKRPmode){
randomize_mats(X, U, Opt);
zero_mat(X, U, MTTKRPmode);
MTTKRP_B_HCSR_GPU_ANYMODE(TiledX, U, Opt, MTTKRPmode);
}
}
}
/* single-CSF*/
else if(Opt.impType == 13 || Opt.impType == 14 ){
if(Opt.verbose)
cout << "Starting sameCSF: MTTKRP on all modes using same CSF" << endl;
sort_COOtensor(X);
create_HCSR(X, Opt);
// compute_reuse(X,Opt);
// compute_reuse_distance(X,Opt);
/* on CPU non tiled */
if(Opt.impType == 13){
for (int MTTKRPmode = 0; MTTKRPmode < X.ndims; ++MTTKRPmode) {
randomize_mats(X, U, Opt);
zero_mat(X, U, MTTKRPmode);
// if( MTTKRPmode == Opt.mode){
if( X.modeOrder[0] == MTTKRPmode)
((X.ndims == 3) ? MTTKRP_HCSR_CPU(X, TiledX, U, Opt) : MTTKRP_HCSR_CPU_4D(X, U, Opt));
// MTTKRPmode = (Opt.mode + 1) % X.ndims;
else if( X.modeOrder[1] == MTTKRPmode) {
create_fbrLikeSlcInds(X, Opt);
MTTKRP_HCSR_CPU_mode1(X, U, Opt, MTTKRPmode);
}
// // MTTKRPmode = (Opt.mode + 2) % X.ndims;
else if( X.modeOrder[2] == MTTKRPmode)
MTTKRP_HCSR_CPU_mode2(X, U, Opt, MTTKRPmode);
}
}
/* on GPU tiled (skipping on tiled gpu due to time constraints)*/
if(Opt.impType == 14){
int tilingMode = X.modeOrder[X.ndims -1];
Opt.tileSize = (X.dims[tilingMode] + Opt.nTile - 1)/Opt.nTile;
if(Opt.nTile > X.dims[tilingMode]){
cout << "Number of tiles ("<< Opt.nTile << ") should be as minimum as K's dimension (" << X.dims[tilingMode] << "). Exiting."<< endl ;
exit(0);
}
// print_HCSRtensor_4D(X);
// split X into tiles based on K indices
make_KTiling(X, TiledX, Opt);
// create HCSR for each tile
for (int tile = 0; tile < Opt.nTile; ++tile){
if(TiledX[tile].totNnz > 0){
create_TiledHCSR(TiledX, Opt, tile);
create_fbrLikeSlcInds(TiledX, tile);
// print_COOtensor(X);
// print_TiledHCSRtensor(TiledX, tile);
}
}
// Split tiles into bins accordin to nnz in slice
for (int tile = 0; tile < Opt.nTile; ++tile){
if(TiledX[tile].totNnz > 0)
make_TiledBin(TiledX, Opt, tile);
}
MTTKRP_ONE_HCSR_GPU(TiledX, U, Opt);
// MTTKRP_B_HCSR_GPU(TiledX, U, Opt);
}
}
/* MI-CSF*/
else if(Opt.impType == 11 || Opt.impType == 12){
double t0 = seconds();
if(Opt.verbose)
cout << "Starting MI-CSF" << endl;
/*Collect slice and fiber stats: Create CSF for all modes*/
bool slcNfbrStats = true;
Tensor *arrX = new Tensor[X.ndims];
if(slcNfbrStats){
for (int m = 0; m < X.ndims; ++m){
init_tensor(arrX, X, Opt, m);
if(m!= Opt.mode) //already sorted
t0 = seconds();
sort_COOtensor(arrX[m]);
// printf("sort - mode %d - %.3f\n", m, seconds() - t0);
t0 = seconds();
create_HCSR(arrX[m], Opt);
// printf("creat CSF - mode %d - %.3f\n", m, seconds() - t0);
// get_nnzPerFiberData(arrX[m]); //merge with createCSF
// create_hashtable(arrX[m]);
// cout << "created Hshtable" << endl;
// print_HCSRtensor(arrX[m]);
}
}
TiledTensor ModeWiseTiledX[X.ndims];
t0 = seconds();
//mm_partition_allMode(arrX, X, ModeWiseTiledX, Opt);
mm_partition_reuseBased(arrX, X, ModeWiseTiledX, Opt);
populate_paritions(X, ModeWiseTiledX);
// printf("mm_partition & populate - time: %.3f sec \n", seconds() - t0);
t0 = seconds();
double start_time = omp_get_wtime();
// omp_set_num_threads(X.ndims);
// #pragma omp parallel
{
// int threadnum = omp_get_thread_num(), numthreads = omp_get_num_threads();
// #pragma omp for
for (int m = 0; m < X.ndims; ++m){
if(ModeWiseTiledX[m].totNnz > 0){
sort_MI_CSF(X, ModeWiseTiledX, m);
create_TiledHCSR(ModeWiseTiledX, Opt, m);
create_fbrLikeSlcInds(ModeWiseTiledX, m);
make_TiledBin(ModeWiseTiledX, Opt, m);
// cout << "printing " << m << endl;
// print_TiledCOO(ModeWiseTiledX, m);
// print_TiledHCSRtensor(ModeWiseTiledX, m);
// compute_reuse(ModeWiseTiledX, Opt, m);
}
// cout << threadnum << " " << numthreads << endl;
}
}
double omp_time = omp_get_wtime() - start_time;
// printf("Sort,createCSF,createFbrIND - time: %.3f sec, %g \n", seconds() - t0, omp_time);
/* on CPU */
if(Opt.impType == 11){
for (int MTTKRPmode = 0; MTTKRPmode < X.ndims; ++MTTKRPmode){
cout << "MTTKRP on mode " << MTTKRPmode << " using MI-CSF" << endl;
randomize_mats(X, U, Opt);
zero_mat(X, U, MTTKRPmode);
for (int m = 0; m < X.ndims; ++m){
int mode0 = ModeWiseTiledX[m].modeOrder[0];
int mode1 = ModeWiseTiledX[m].modeOrder[1];
int mode2 = ModeWiseTiledX[m].modeOrder[2];
if (mode0 == MTTKRPmode && ModeWiseTiledX[m].totNnz)
MTTKRP_MIHCSR_CPU(ModeWiseTiledX, U, Opt, m);
else if (mode1 == MTTKRPmode && ModeWiseTiledX[m].totNnz ){
// create_fbrLikeSlcInds(ModeWiseTiledX, U, Opt, m, MTTKRPmode);
MTTKRP_MIHCSR_CPU_FBR_ATOMICS(ModeWiseTiledX, U, Opt, m, MTTKRPmode);
}
else if (mode2 == MTTKRPmode && ModeWiseTiledX[m].totNnz )
MTTKRP_MIHCSR_CPU_ALL_ATOMICS(ModeWiseTiledX, U, Opt, m, MTTKRPmode);
}
}
}
/* on GPU */
else if(Opt.impType == 12){
MTTKRP_MIHCSR_GPU(ModeWiseTiledX, U, Opt);
}
// printf("MIHCSR incl CPU - time: %.3f sec \n", seconds() - t0);
}
else // e.g. -1
cout << "no MTTKRP" << endl;
if(!Opt.outFileName.empty()){
write_output(U, Opt.mode, Opt.outFileName);
}
if(Opt.correctness){
if (Opt.impType == 1) {
cout << "Already running COO seq on CPU!" << endl;
exit(0);
}
if(Opt.verbose && (Opt.impType == 12 || Opt.impType == 14))
cout << "checking only the last mode. " << endl;
// Opt.mode = 0;//X.modeOrder[2];
// int MTTKRPmode = 2;
Opt.mode = ((Opt.impType == 12 || Opt.impType == 14 ) ? X.ndims-1 : Opt.mode);
int mode = Opt.mode;
int nr = U[mode].nRows;
int nc = U[mode].nCols;
DTYPE *out = (DTYPE*)malloc(nr * nc * sizeof(DTYPE));
memcpy(out, U[mode].vals, nr*nc * sizeof(DTYPE));
print_matrix(U, mode);
randomize_mats(X, U, Opt);
zero_mat(X, U, mode);
cout << "correctness with COO on mode " << mode <<". "<< endl;
((X.ndims == 3) ? MTTKRP_COO_CPU(X, U, Opt) : MTTKRP_COO_CPU_4D(X, U, Opt));
// MTTKRP_HCSR_CPU_slc(X, TiledX, U, Opt);
print_matrix(U, mode);
correctness_check(out, U[mode].vals, nr, nc);
}
}
|
63f428705aa79f10c3cd993c870688418ca7eb1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Evolve( int *val, int *aux, int n ) {
int up, upright, right, rightdown, down, downleft, left, leftup;
int sum = 0, estado;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if ( i > 0 && i < (n - 1) && j > 0 && j < (n - 1) ){
estado = val[ i * n + j ];
up = val[ ( i - 1 ) * n + j ];
upright = val[ ( i - 1 ) * n + j + 1 ];
right = val[ i * n + j + 1 ];
rightdown = val[ ( i + 1 ) * n + j + 1 ];
down = val[ ( i + 1 ) * n + j ];
downleft = val[ ( i + 1 ) * n + j - 1 ];
left = val[ i * n + j - 1 ];
leftup = val[ ( i - 1 ) * n + j - 1 ];
sum = up + upright + right + rightdown + down + downleft + left + leftup;
if( sum == 3 ) {
estado = 1;
}
else if( ( estado == 1 ) && ( ( sum < 2 ) || ( sum > 3 ) ) ) {
estado = 0;
}
aux[ i * n + j ] = estado;
}
} | 63f428705aa79f10c3cd993c870688418ca7eb1e.cu | #include "includes.h"
__global__ void Evolve( int *val, int *aux, int n ) {
int up, upright, right, rightdown, down, downleft, left, leftup;
int sum = 0, estado;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if ( i > 0 && i < (n - 1) && j > 0 && j < (n - 1) ){
estado = val[ i * n + j ];
up = val[ ( i - 1 ) * n + j ];
upright = val[ ( i - 1 ) * n + j + 1 ];
right = val[ i * n + j + 1 ];
rightdown = val[ ( i + 1 ) * n + j + 1 ];
down = val[ ( i + 1 ) * n + j ];
downleft = val[ ( i + 1 ) * n + j - 1 ];
left = val[ i * n + j - 1 ];
leftup = val[ ( i - 1 ) * n + j - 1 ];
sum = up + upright + right + rightdown + down + downleft + left + leftup;
if( sum == 3 ) {
estado = 1;
}
else if( ( estado == 1 ) && ( ( sum < 2 ) || ( sum > 3 ) ) ) {
estado = 0;
}
aux[ i * n + j ] = estado;
}
} |
2f059ad176558a56e25f056af05a52b942fb95b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/adagrad_op.h"
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/selected_rows_functor.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace {
template <typename T, int block_size>
__global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows,
T* grad_merge, const int64_t* grad_merge_rows,
size_t grad_merge_rows_size,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ size_t grad_merge_idx;
if (tid == 0) {
for (size_t i = 0; i < grad_merge_rows_size; i++) {
if (grad_rows[ty] == grad_merge_rows[i]) {
grad_merge_idx = i;
}
}
}
__syncthreads();
grad += ty * row_numel;
grad_merge += grad_merge_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]);
}
}
template <typename T, int block_size>
__global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows,
const T* learning_rate, T* param,
T* moment, int64_t row_numel,
T epsilon) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
grad += ty * row_numel;
param += rows[ty] * row_numel;
moment += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(param + index,
-1.0 * learning_rate[0] * grad[index] /
(sqrt(moment[index]) + epsilon));
}
}
} // namespace
template <typename T>
struct SparseAdagradFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& grad,
const framework::Tensor& learning_rate, T epsilon,
framework::Tensor* moment, framework::Tensor* param) {
// 1. g_m.rows = set(g.rows)
auto grad_width = grad.value().dims()[1];
math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func;
auto grad_merge = merge_func(context, grad);
auto* grad_merge_data = grad_merge.mutable_value()->template data<T>();
framework::Vector<int64_t> merge_rows(grad_merge.rows());
// 2. m += g_m * g_m
math::scatter::Mul<platform::CUDADeviceContext, T> sqare_func;
auto grad_square = sqare_func(context, grad_merge, grad_merge);
math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor;
functor(context, grad_square, moment);
// 3. update parameter
auto* lr = learning_rate.data<T>();
auto* param_data = param->data<T>();
auto* moment_data = moment->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid2(1, merge_rows.size());
hipLaunchKernelGGL(( SparseAdagradFunctorKernel<
T, 256>), dim3(grid2), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(), grad_merge_data, merge_rows.cuda_data(), lr,
param_data, moment_data, grad_width,
epsilon);
}
};
template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>;
template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
| 2f059ad176558a56e25f056af05a52b942fb95b2.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU
#include "paddle/operators/adagrad_op.h"
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/selected_rows_functor.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace {
template <typename T, int block_size>
__global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows,
T* grad_merge, const int64_t* grad_merge_rows,
size_t grad_merge_rows_size,
int64_t row_numel) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
__shared__ size_t grad_merge_idx;
if (tid == 0) {
for (size_t i = 0; i < grad_merge_rows_size; i++) {
if (grad_rows[ty] == grad_merge_rows[i]) {
grad_merge_idx = i;
}
}
}
__syncthreads();
grad += ty * row_numel;
grad_merge += grad_merge_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]);
}
}
template <typename T, int block_size>
__global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows,
const T* learning_rate, T* param,
T* moment, int64_t row_numel,
T epsilon) {
const int ty = blockIdx.y;
int tid = threadIdx.x;
grad += ty * row_numel;
param += rows[ty] * row_numel;
moment += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
paddle::platform::CudaAtomicAdd(param + index,
-1.0 * learning_rate[0] * grad[index] /
(sqrt(moment[index]) + epsilon));
}
}
} // namespace
template <typename T>
struct SparseAdagradFunctor<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::SelectedRows& grad,
const framework::Tensor& learning_rate, T epsilon,
framework::Tensor* moment, framework::Tensor* param) {
// 1. g_m.rows = set(g.rows)
auto grad_width = grad.value().dims()[1];
math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func;
auto grad_merge = merge_func(context, grad);
auto* grad_merge_data = grad_merge.mutable_value()->template data<T>();
framework::Vector<int64_t> merge_rows(grad_merge.rows());
// 2. m += g_m * g_m
math::scatter::Mul<platform::CUDADeviceContext, T> sqare_func;
auto grad_square = sqare_func(context, grad_merge, grad_merge);
math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor;
functor(context, grad_square, moment);
// 3. update parameter
auto* lr = learning_rate.data<T>();
auto* param_data = param->data<T>();
auto* moment_data = moment->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid2(1, merge_rows.size());
SparseAdagradFunctorKernel<
T, 256><<<grid2, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(grad_merge_data, merge_rows.cuda_data(), lr,
param_data, moment_data, grad_width,
epsilon);
}
};
template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>;
template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>;
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
a6be5edf5c6037f6eb02403d4bf8c1ec224cac1f.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
/**
* @file
* test_bfs.cu
*
* @brief Simple test driver program for breadth-first search.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <algorithm>
#include <iostream>
#include <fstream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
#include <gunrock/util/track_utils.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// graph structure
#include "../matrix.h"
//Generic tools handling fill
#include "../graph_tools.h"
#include "bfs.h"
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bfs;
void ref_bfs_mask(const int src_node, const int dst_node, const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, const int *col_mask, int *parents)
{
int *q = (int*)malloc(num_nodes * sizeof(int));
q[0] = src_node;
parents[src_node] = src_node;
int idx = -1;
int size = 1;
int found = 0;
while (idx+1 < size && !found) {
idx++;
int u = q[idx];
for (int i = row_offsets[u]; i < row_offsets[u+1]; i++) {
int v = col_indices[i];
if (parents[v] == -1 && col_mask[i]) {
parents[v] = u;
if (v == dst_node) {
found = 1;
break;
}
else {
q[size] = v;
size++;
}
}
}
}
}
int bfs(int *row_offsets, int *col_indices, int num_nodes, int num_edges, int src_node, int dst_node, int *q, int *output, int output_type, int *col_mask)
{
fill(num_nodes, output, -1);
hipDeviceSynchronize();
bool mark_pred = (output_type == BFS_MARK_PREDECESSOR);
#if 0
// TODO: use Gunrock's customized BFS here
ref_bfs_mask(src_node, dst_node, num_nodes, num_edges, row_offsets, col_indices, col_mask, parents);
return hipSuccess;
#else
typedef int VertexId;
typedef int SizeT;
typedef int Value;
typedef BFSProblem <VertexId,SizeT,Value,
false, // MARK_PREDECESSORS
true> // IDEMPOTENCE
Problem;
typedef BFSEnactor <Problem> Enactor;
hipError_t retval = hipSuccess;
Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>;
info->InitBase2("BFS");
ContextPtr *context = (ContextPtr*)info->context;
hipStream_t *streams = (hipStream_t*)info->streams;
int *gpu_idx = new int[1];
gpu_idx[0] = 0;
Problem *problem = new Problem(false, false); //no direction optimized, no undirected
if (retval = util::GRError(problem->Init(
false, //stream_from_host (depricated)
row_offsets,
col_indices,
col_mask,
output,
num_nodes,
num_edges,
1,
NULL,
"random",
streams),
"BFS Problem Init failed", __FILE__, __LINE__)) return retval;
Enactor *enactor = new Enactor(1, gpu_idx);
if (retval = util::GRError(enactor->Init(context, problem),
"BFS Enactor Init failed.", __FILE__, __LINE__)) return retval;
if (retval = util::GRError(problem->Reset(
src_node, enactor->GetFrontierType()),
"BFS Problem Reset failed", __FILE__, __LINE__))
return retval;
if (retval = util::GRError(enactor->Reset(),
"BFS Enactor Reset failed", __FILE__, __LINE__))
return retval;
if (retval = util::GRError(enactor->Enact(src_node),
"BFS Enact failed", __FILE__, __LINE__)) return retval;
if (retval = util::GRError(problem->Extract(output, NULL),
"BFS Extract failed", __FILE__, __LINE__)) return retval;
// free memory
delete info;
delete problem;
delete enactor;
//check if path exists
//MAX_INT default value for src dis TODO
return (dst_node >= 0 && dst_node < num_nodes) && (output[dst_node] != -1);
#endif
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| a6be5edf5c6037f6eb02403d4bf8c1ec224cac1f.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
/**
* @file
* test_bfs.cu
*
* @brief Simple test driver program for breadth-first search.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <algorithm>
#include <iostream>
#include <fstream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
#include <gunrock/util/track_utils.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// graph structure
#include "../matrix.h"
//Generic tools handling fill
#include "../graph_tools.h"
#include "bfs.h"
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bfs;
void ref_bfs_mask(const int src_node, const int dst_node, const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, const int *col_mask, int *parents)
{
int *q = (int*)malloc(num_nodes * sizeof(int));
q[0] = src_node;
parents[src_node] = src_node;
int idx = -1;
int size = 1;
int found = 0;
while (idx+1 < size && !found) {
idx++;
int u = q[idx];
for (int i = row_offsets[u]; i < row_offsets[u+1]; i++) {
int v = col_indices[i];
if (parents[v] == -1 && col_mask[i]) {
parents[v] = u;
if (v == dst_node) {
found = 1;
break;
}
else {
q[size] = v;
size++;
}
}
}
}
}
int bfs(int *row_offsets, int *col_indices, int num_nodes, int num_edges, int src_node, int dst_node, int *q, int *output, int output_type, int *col_mask)
{
fill(num_nodes, output, -1);
cudaDeviceSynchronize();
bool mark_pred = (output_type == BFS_MARK_PREDECESSOR);
#if 0
// TODO: use Gunrock's customized BFS here
ref_bfs_mask(src_node, dst_node, num_nodes, num_edges, row_offsets, col_indices, col_mask, parents);
return cudaSuccess;
#else
typedef int VertexId;
typedef int SizeT;
typedef int Value;
typedef BFSProblem <VertexId,SizeT,Value,
false, // MARK_PREDECESSORS
true> // IDEMPOTENCE
Problem;
typedef BFSEnactor <Problem> Enactor;
cudaError_t retval = cudaSuccess;
Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>;
info->InitBase2("BFS");
ContextPtr *context = (ContextPtr*)info->context;
cudaStream_t *streams = (cudaStream_t*)info->streams;
int *gpu_idx = new int[1];
gpu_idx[0] = 0;
Problem *problem = new Problem(false, false); //no direction optimized, no undirected
if (retval = util::GRError(problem->Init(
false, //stream_from_host (depricated)
row_offsets,
col_indices,
col_mask,
output,
num_nodes,
num_edges,
1,
NULL,
"random",
streams),
"BFS Problem Init failed", __FILE__, __LINE__)) return retval;
Enactor *enactor = new Enactor(1, gpu_idx);
if (retval = util::GRError(enactor->Init(context, problem),
"BFS Enactor Init failed.", __FILE__, __LINE__)) return retval;
if (retval = util::GRError(problem->Reset(
src_node, enactor->GetFrontierType()),
"BFS Problem Reset failed", __FILE__, __LINE__))
return retval;
if (retval = util::GRError(enactor->Reset(),
"BFS Enactor Reset failed", __FILE__, __LINE__))
return retval;
if (retval = util::GRError(enactor->Enact(src_node),
"BFS Enact failed", __FILE__, __LINE__)) return retval;
if (retval = util::GRError(problem->Extract(output, NULL),
"BFS Extract failed", __FILE__, __LINE__)) return retval;
// free memory
delete info;
delete problem;
delete enactor;
//check if path exists
//MAX_INT default value for src dis TODO
return (dst_node >= 0 && dst_node < num_nodes) && (output[dst_node] != -1);
#endif
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
f7b0593574eeefa3aa904ff643a22a6f7b28d9e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Vector-Matrix multiplication: Y = A * X.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "vec_mat_mult.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel_using_global_memory(float *Ad, float *Xd, float *Yd)
{
//Multiply A nd X
/* thread id */
int tx = blockIdx.x * blockDim.x + threadIdx.x;
/* variable for partial product */
float prod = 0;
/* calculation loop - row of A X col of X */
for ( int i = 0; i < MATRIX_SIZE; ++i ) {
float A_element = Ad[ MATRIX_SIZE*tx + i ];
float X_element = Xd[ i ];
prod += A_element * X_element;
}
/* store result */
Yd[ tx ] = prod;
}
__global__ void MatrixMulKernel_using_shared_memory(float *Ad, float *Xd, float *Yd)
{
//Multiply A nd X
/* declare shared memory */
__shared__ float shared_X[ 16 ];
__shared__ float shared_A[ 16 ][ 16 ];
/* thread id */
int row_num = blockIdx.y * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
/* variable for partial product */
int temp = 0;
/* calculation loop -> uses memory coalescing */
for ( int i = 0; i < MATRIX_SIZE; i = i + 16) {
/* transfers from global to shared memory -> coalesced */
shared_A[ ty ][ tx ] = Ad[ MATRIX_SIZE * row_num + tx + i ];
shared_X[ tx ] = Xd[ tx + i ];
__syncthreads();
/* only first thread in row does actual calculation */
if ( threadIdx.x == 0 ) {
for ( int k = 0; k < blockDim.x; k++ ) {
temp += shared_A[ tx ][ k ] * shared_X[k];
}
}
__syncthreads();
}
/* only have first thread in row report */
if ( threadIdx.x == 0 ){
Yd[ row_num ] = temp;
}
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| f7b0593574eeefa3aa904ff643a22a6f7b28d9e2.cu | /* Vector-Matrix multiplication: Y = A * X.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "vec_mat_mult.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel_using_global_memory(float *Ad, float *Xd, float *Yd)
{
//Multiply A nd X
/* thread id */
int tx = blockIdx.x * blockDim.x + threadIdx.x;
/* variable for partial product */
float prod = 0;
/* calculation loop - row of A X col of X */
for ( int i = 0; i < MATRIX_SIZE; ++i ) {
float A_element = Ad[ MATRIX_SIZE*tx + i ];
float X_element = Xd[ i ];
prod += A_element * X_element;
}
/* store result */
Yd[ tx ] = prod;
}
__global__ void MatrixMulKernel_using_shared_memory(float *Ad, float *Xd, float *Yd)
{
//Multiply A nd X
/* declare shared memory */
__shared__ float shared_X[ 16 ];
__shared__ float shared_A[ 16 ][ 16 ];
/* thread id */
int row_num = blockIdx.y * blockDim.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
/* variable for partial product */
int temp = 0;
/* calculation loop -> uses memory coalescing */
for ( int i = 0; i < MATRIX_SIZE; i = i + 16) {
/* transfers from global to shared memory -> coalesced */
shared_A[ ty ][ tx ] = Ad[ MATRIX_SIZE * row_num + tx + i ];
shared_X[ tx ] = Xd[ tx + i ];
__syncthreads();
/* only first thread in row does actual calculation */
if ( threadIdx.x == 0 ) {
for ( int k = 0; k < blockDim.x; k++ ) {
temp += shared_A[ tx ][ k ] * shared_X[k];
}
}
__syncthreads();
}
/* only have first thread in row report */
if ( threadIdx.x == 0 ){
Yd[ row_num ] = temp;
}
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
5f95ec41f347e9cede9062a0df1210be5c1bc4ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cmath>
#include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG
// Number of iterations in the kernel
#define ITER_MULTIPLIER 4
// Information of stream for simple domain decomposition
struct stream {
hipStream_t strm; // Stream
int len; // Length of the part for this stream
double *A, *B, *C;
};
// Kernel for vector summation
__global__ void vector_add(double *C, const double *A, const double *B,
int N, int iterations){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Do not try to access past the allocated memory
for (int i = idx; i < N; i += stride) {
C[i] = 0;
for (int j = 0; j < ITER_MULTIPLIER * iterations; j++) {
C[i] += A[i] + B[i];
}
}
}
// Routine for stream test
void streamtest(stream *s, int nstreams, int tib,
int iterations)
{
for (int i = 0; i < nstreams; ++i) {
// Add here the copy - kernel execution - copy sequence
// for each stream
int slen = s[i].len;
dim3 grid, threads;
grid.x = (slen + tib - 1) / tib;
threads.x = tib;
//set value on CPU
for(int j = 0; j < s[i].len; ++j) {
s[i].A[j] = 1.0;
s[i].B[j] = 2.0;
}
hipLaunchKernelGGL(( vector_add), dim3(grid), dim3(threads), 0, s[i].strm, s[i].C, s[i].A, s[i].B, slen, iterations);
}
for (int i = 0; i < nstreams; ++i) {
//check value
double errorsum = 0;
const double ref_value = 3.0 * ITER_MULTIPLIER * iterations;
//TODO Here the CPU accesses C array, make sure it is allowed to.
for(int j = 0; j < s[i].len; ++j) {
errorsum += s[i].C[j] - ref_value;
}
printf("Errorsum is %g on stream %d\n", errorsum, i);
}
}
// Create the streams and compute the decomposition
void create_streams(int nstreams, int vecsize, stream **strm)
{
*strm = new stream[nstreams];
stream *s = *strm;
for(int i = 0; i < nstreams; i++) {
CUDA_CHECK( hipStreamCreate(&s[i].strm) );
}
s[0].len = vecsize / nstreams;
s[0].len += vecsize % nstreams ? 1 : 0;
for(int i = 1; i < nstreams; i++) {
int add = vecsize / nstreams;
if(i < vecsize % nstreams) {
add++;
}
s[i].len = add;
}
for(int i = 0; i < nstreams; i++) {
//TODO: Add here allocations for managed memory
//TODO: Attach them to streams to enable independent operation of the various streams
}
}
// Delete the streams
void destroy_streams(int nstreams, stream *s)
{
for(int i = 0; i < nstreams; i++) {
CUDA_CHECK( hipStreamDestroy(s[i].strm) );
//TODO: Free memory allocations
}
delete[] s;
}
int main(int argc, char *argv[])
{
const int ThreadsInBlock = 512;
int iterations;
stream *s;
hipDeviceProp_t prop;
if (argc < 2) {
printf("Usage: %s N\nwhere N is the length of the vector.\n",
argv[0]);
exit(EXIT_FAILURE);
}
int N = atoi(argv[1]);
// Determine the number of available multiprocessors on the device.
// It is used for a coarse adjustment of the computation part of
// this test.
hipGetDeviceProperties(&prop, 0);
iterations = (prop.multiProcessorCount + 1) / 2;
// Now do the addition with streams, note that each stream will need to allocate its
// own memory area
int stream_count = 8;
create_streams(stream_count, N, &s);
streamtest(s, stream_count, ThreadsInBlock, iterations);
destroy_streams(stream_count, s);
return 0;
}
| 5f95ec41f347e9cede9062a0df1210be5c1bc4ae.cu | #include <cstdio>
#include <cmath>
#include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG
// Number of iterations in the kernel
#define ITER_MULTIPLIER 4
// Information of stream for simple domain decomposition
struct stream {
cudaStream_t strm; // Stream
int len; // Length of the part for this stream
double *A, *B, *C;
};
// Kernel for vector summation
__global__ void vector_add(double *C, const double *A, const double *B,
int N, int iterations){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Do not try to access past the allocated memory
for (int i = idx; i < N; i += stride) {
C[i] = 0;
for (int j = 0; j < ITER_MULTIPLIER * iterations; j++) {
C[i] += A[i] + B[i];
}
}
}
// Routine for stream test
void streamtest(stream *s, int nstreams, int tib,
int iterations)
{
for (int i = 0; i < nstreams; ++i) {
// Add here the copy - kernel execution - copy sequence
// for each stream
int slen = s[i].len;
dim3 grid, threads;
grid.x = (slen + tib - 1) / tib;
threads.x = tib;
//set value on CPU
for(int j = 0; j < s[i].len; ++j) {
s[i].A[j] = 1.0;
s[i].B[j] = 2.0;
}
vector_add<<<grid, threads, 0, s[i].strm>>>(s[i].C, s[i].A, s[i].B, slen, iterations);
}
for (int i = 0; i < nstreams; ++i) {
//check value
double errorsum = 0;
const double ref_value = 3.0 * ITER_MULTIPLIER * iterations;
//TODO Here the CPU accesses C array, make sure it is allowed to.
for(int j = 0; j < s[i].len; ++j) {
errorsum += s[i].C[j] - ref_value;
}
printf("Errorsum is %g on stream %d\n", errorsum, i);
}
}
// Create the streams and compute the decomposition
void create_streams(int nstreams, int vecsize, stream **strm)
{
*strm = new stream[nstreams];
stream *s = *strm;
for(int i = 0; i < nstreams; i++) {
CUDA_CHECK( cudaStreamCreate(&s[i].strm) );
}
s[0].len = vecsize / nstreams;
s[0].len += vecsize % nstreams ? 1 : 0;
for(int i = 1; i < nstreams; i++) {
int add = vecsize / nstreams;
if(i < vecsize % nstreams) {
add++;
}
s[i].len = add;
}
for(int i = 0; i < nstreams; i++) {
//TODO: Add here allocations for managed memory
//TODO: Attach them to streams to enable independent operation of the various streams
}
}
// Delete the streams
void destroy_streams(int nstreams, stream *s)
{
for(int i = 0; i < nstreams; i++) {
CUDA_CHECK( cudaStreamDestroy(s[i].strm) );
//TODO: Free memory allocations
}
delete[] s;
}
int main(int argc, char *argv[])
{
const int ThreadsInBlock = 512;
int iterations;
stream *s;
cudaDeviceProp prop;
if (argc < 2) {
printf("Usage: %s N\nwhere N is the length of the vector.\n",
argv[0]);
exit(EXIT_FAILURE);
}
int N = atoi(argv[1]);
// Determine the number of available multiprocessors on the device.
// It is used for a coarse adjustment of the computation part of
// this test.
cudaGetDeviceProperties(&prop, 0);
iterations = (prop.multiProcessorCount + 1) / 2;
// Now do the addition with streams, note that each stream will need to allocate its
// own memory area
int stream_count = 8;
create_streams(stream_count, N, &s);
streamtest(s, stream_count, ThreadsInBlock, iterations);
destroy_streams(stream_count, s);
return 0;
}
|
68b41002717d5fa6c9d93467f7fc5c6c93058cf1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
block_dim: <M / b, N / b>
thread_dim: <t, t>
thread function
// Each thread block computes a b x b area
// buffersA2GPUfetch
__global__ void SGEMM(float *A, float *B, float *C, int b, int s) {
__shared__ float sA[2][b][s], sB[2][s][b]; // shared by a thread block
float rC[bt][bt] = {0}; // thread local buffer, in the registers
Cooperative fetch first strip from A, B to sA[0], sB[0]
__sync_threads();
for (k = 0; k < K / s; k += 1) {
Cooperative fetch next strip from A, B to sA[(k + 1) % 2], sB[(k + 1) % 2] // parallelly
__sync_threads();
for (kk = 0; kk < s; kk += 1) {
for (j = 0; j < bt; j += 1) { // unroll loop
for (i = 0; i < bt; i += 1) { // unroll loop
rC[j][i] += sA[k % 2][threadIdx.x * bt + j][kk] * sB[k % 2][kk][threadIdx.y * bt + i];
}
}
}
}
Write rC back to C
} | 68b41002717d5fa6c9d93467f7fc5c6c93058cf1.cu | block_dim: <M / b, N / b>
thread_dim: <t, t>
thread function
// Each thread block computes a b x b area
// 双buffer的思路:sA的第一维是2,感觉是利用GPU流水线的能力,提前去fetch下一次循环的数据
__global__ void SGEMM(float *A, float *B, float *C, int b, int s) {
__shared__ float sA[2][b][s], sB[2][s][b]; // shared by a thread block
float rC[bt][bt] = {0}; // thread local buffer, in the registers
Cooperative fetch first strip from A, B to sA[0], sB[0]
__sync_threads();
for (k = 0; k < K / s; k += 1) {
Cooperative fetch next strip from A, B to sA[(k + 1) % 2], sB[(k + 1) % 2] // 和矩阵计算parallelly运行
__sync_threads();
for (kk = 0; kk < s; kk += 1) {
for (j = 0; j < bt; j += 1) { // unroll loop
for (i = 0; i < bt; i += 1) { // unroll loop
rC[j][i] += sA[k % 2][threadIdx.x * bt + j][kk] * sB[k % 2][kk][threadIdx.y * bt + i];
}
}
}
}
Write rC back to C
} |
7ad378d776c9cd630a2e3caa57494cc3093ab4d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "flamegpu/runtime/messaging/MessageArray3D.h"
#include "flamegpu/model/AgentDescription.h" // Used by Move-Assign
#include "flamegpu/gpu/CUDAMessage.h"
#include "flamegpu/gpu/CUDAScatter.cuh"
#include "flamegpu/runtime/messaging/MessageArray3D/MessageArray3DHost.h"
// #include "flamegpu/runtime/messaging/MessageArray3D/MessageArray3DDevice.cuh"
namespace flamegpu {
/**
* Constructor
* Allocates memory on device for message list length
* @param a Parent CUDAMessage, used to access message settings, data ptrs etc
*/
MessageArray3D::CUDAModelHandler::CUDAModelHandler(CUDAMessage &a)
: MessageSpecialisationHandler()
, d_metadata(nullptr)
, sim_message(a)
, d_write_flag(nullptr)
, d_write_flag_len(0) {
const Data& d = static_cast<const Data &>(a.getMessageDescription());
memcpy(&hd_metadata.dimensions, d.dimensions.data(), d.dimensions.size() * sizeof(unsigned int));
hd_metadata.length = d.dimensions[0] * d.dimensions[1] * d.dimensions[2];
}
void MessageArray3D::CUDAModelHandler::init(CUDAScatter &scatter, const unsigned int &streamId) {
allocateMetaDataDevicePtr();
// Allocate messages
this->sim_message.resize(hd_metadata.length, scatter, streamId);
this->sim_message.setMessageCount(hd_metadata.length);
// Zero the output arrays
auto &read_list = this->sim_message.getReadList();
auto &write_list = this->sim_message.getWriteList();
for (auto &var : this->sim_message.getMessageDescription().variables) {
// Elements is harmless, futureproof for arrays support
// hd_metadata.length is used, as message array can be longer than message count
gpuErrchk(hipMemset(write_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length));
gpuErrchk(hipMemset(read_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length));
}
}
void MessageArray3D::CUDAModelHandler::allocateMetaDataDevicePtr() {
if (d_metadata == nullptr) {
gpuErrchk(hipMalloc(&d_metadata, sizeof(MetaData)));
gpuErrchk(hipMemcpy(d_metadata, &hd_metadata, sizeof(MetaData), hipMemcpyHostToDevice));
}
}
void MessageArray3D::CUDAModelHandler::freeMetaDataDevicePtr() {
if (d_metadata != nullptr) {
gpuErrchk(hipFree(d_metadata));
}
d_metadata = nullptr;
if (d_write_flag) {
gpuErrchk(hipFree(d_write_flag));
}
d_write_flag = nullptr;
d_write_flag_len = 0;
}
void MessageArray3D::CUDAModelHandler::buildIndex(CUDAScatter &scatter, const unsigned int &streamId, const hipStream_t &stream) {
const unsigned int MESSAGE_COUNT = this->sim_message.getMessageCount();
// Zero the output arrays
auto &read_list = this->sim_message.getReadList();
auto &write_list = this->sim_message.getWriteList();
for (auto &var : this->sim_message.getMessageDescription().variables) {
// Elements is harmless, futureproof for arrays support
// hd_metadata.length is used, as message array can be longer than message count
gpuErrchk(hipMemset(write_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length));
}
// Reorder messages
unsigned int *t_d_write_flag = nullptr;
if (MESSAGE_COUNT > hd_metadata.length) {
// Use internal memory for d_write_flag
if (d_write_flag_len < MESSAGE_COUNT) {
// Increase length
if (d_write_flag) {
gpuErrchk(hipFree(d_write_flag));
}
d_write_flag_len = static_cast<unsigned int>(MESSAGE_COUNT * 1.1f);
gpuErrchk(hipMalloc(&d_write_flag, sizeof(unsigned int) * d_write_flag_len));
}
t_d_write_flag = d_write_flag;
}
scatter.arrayMessageReorder(streamId, stream, this->sim_message.getMessageDescription().variables, read_list, write_list, MESSAGE_COUNT, hd_metadata.length, t_d_write_flag);
this->sim_message.swap();
// Reset message count back to full array length
// Array message exposes not output messages as 0
if (MESSAGE_COUNT != hd_metadata.length)
this->sim_message.setMessageCount(hd_metadata.length);
// Detect errors
// TODO
}
MessageArray3D::Data::Data(const std::shared_ptr<const ModelData> &model, const std::string &message_name)
: MessageBruteForce::Data(model, message_name)
, dimensions({0, 0, 0}) {
description = std::unique_ptr<MessageArray3D::Description>(new MessageArray3D::Description(model, this));
variables.emplace("___INDEX", Variable(1, size_type()));
}
MessageArray3D::Data::Data(const std::shared_ptr<const ModelData> &model, const Data &other)
: MessageBruteForce::Data(model, other)
, dimensions(other.dimensions) {
description = std::unique_ptr<MessageArray3D::Description>(model ? new MessageArray3D::Description(model, this) : nullptr);
if (dimensions[0] == 0 || dimensions[1] == 0 || dimensions[2] == 0) {
THROW exception::InvalidMessage("All dimensions must be above zero in array3D message '%s'\n", other.name.c_str());
}
}
MessageArray3D::Data *MessageArray3D::Data::clone(const std::shared_ptr<const ModelData> &newParent) {
return new Data(newParent, *this);
}
std::unique_ptr<MessageSpecialisationHandler> MessageArray3D::Data::getSpecialisationHander(CUDAMessage &owner) const {
return std::unique_ptr<MessageSpecialisationHandler>(new CUDAModelHandler(owner));
}
std::type_index MessageArray3D::Data::getType() const { return std::type_index(typeid(MessageArray3D)); }
MessageArray3D::Description::Description(const std::shared_ptr<const ModelData> &_model, Data *const data)
: MessageBruteForce::Description(_model, data) { }
void MessageArray3D::Description::setDimensions(const size_type& len_x, const size_type& len_y, const size_type& len_z) {
setDimensions({ len_x , len_y, len_z});
}
void MessageArray3D::Description::setDimensions(const std::array<size_type, 3> &dims) {
if (dims[0] == 0 || dims[1] == 0 || dims[2] == 0) {
THROW exception::InvalidArgument("All dimensions must be above zero in array3D message.\n");
}
reinterpret_cast<Data *>(message)->dimensions = dims;
}
std::array<MessageArray3D::size_type, 3> MessageArray3D::Description::getDimensions() const {
return reinterpret_cast<Data *>(message)->dimensions;
}
MessageArray2D::size_type MessageArray3D::Description::getDimX() const {
return reinterpret_cast<Data *>(message)->dimensions[0];
}
MessageArray2D::size_type MessageArray3D::Description::getDimY() const {
return reinterpret_cast<Data *>(message)->dimensions[1];
}
MessageArray2D::size_type MessageArray3D::Description::getDimZ() const {
return reinterpret_cast<Data *>(message)->dimensions[2];
}
} // namespace flamegpu
| 7ad378d776c9cd630a2e3caa57494cc3093ab4d3.cu | #include "flamegpu/runtime/messaging/MessageArray3D.h"
#include "flamegpu/model/AgentDescription.h" // Used by Move-Assign
#include "flamegpu/gpu/CUDAMessage.h"
#include "flamegpu/gpu/CUDAScatter.cuh"
#include "flamegpu/runtime/messaging/MessageArray3D/MessageArray3DHost.h"
// #include "flamegpu/runtime/messaging/MessageArray3D/MessageArray3DDevice.cuh"
namespace flamegpu {
/**
* Constructor
* Allocates memory on device for message list length
* @param a Parent CUDAMessage, used to access message settings, data ptrs etc
*/
MessageArray3D::CUDAModelHandler::CUDAModelHandler(CUDAMessage &a)
: MessageSpecialisationHandler()
, d_metadata(nullptr)
, sim_message(a)
, d_write_flag(nullptr)
, d_write_flag_len(0) {
const Data& d = static_cast<const Data &>(a.getMessageDescription());
memcpy(&hd_metadata.dimensions, d.dimensions.data(), d.dimensions.size() * sizeof(unsigned int));
hd_metadata.length = d.dimensions[0] * d.dimensions[1] * d.dimensions[2];
}
void MessageArray3D::CUDAModelHandler::init(CUDAScatter &scatter, const unsigned int &streamId) {
allocateMetaDataDevicePtr();
// Allocate messages
this->sim_message.resize(hd_metadata.length, scatter, streamId);
this->sim_message.setMessageCount(hd_metadata.length);
// Zero the output arrays
auto &read_list = this->sim_message.getReadList();
auto &write_list = this->sim_message.getWriteList();
for (auto &var : this->sim_message.getMessageDescription().variables) {
// Elements is harmless, futureproof for arrays support
// hd_metadata.length is used, as message array can be longer than message count
gpuErrchk(cudaMemset(write_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length));
gpuErrchk(cudaMemset(read_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length));
}
}
void MessageArray3D::CUDAModelHandler::allocateMetaDataDevicePtr() {
if (d_metadata == nullptr) {
gpuErrchk(cudaMalloc(&d_metadata, sizeof(MetaData)));
gpuErrchk(cudaMemcpy(d_metadata, &hd_metadata, sizeof(MetaData), cudaMemcpyHostToDevice));
}
}
void MessageArray3D::CUDAModelHandler::freeMetaDataDevicePtr() {
if (d_metadata != nullptr) {
gpuErrchk(cudaFree(d_metadata));
}
d_metadata = nullptr;
if (d_write_flag) {
gpuErrchk(cudaFree(d_write_flag));
}
d_write_flag = nullptr;
d_write_flag_len = 0;
}
void MessageArray3D::CUDAModelHandler::buildIndex(CUDAScatter &scatter, const unsigned int &streamId, const cudaStream_t &stream) {
const unsigned int MESSAGE_COUNT = this->sim_message.getMessageCount();
// Zero the output arrays
auto &read_list = this->sim_message.getReadList();
auto &write_list = this->sim_message.getWriteList();
for (auto &var : this->sim_message.getMessageDescription().variables) {
// Elements is harmless, futureproof for arrays support
// hd_metadata.length is used, as message array can be longer than message count
gpuErrchk(cudaMemset(write_list.at(var.first), 0, var.second.type_size * var.second.elements * hd_metadata.length));
}
// Reorder messages
unsigned int *t_d_write_flag = nullptr;
if (MESSAGE_COUNT > hd_metadata.length) {
// Use internal memory for d_write_flag
if (d_write_flag_len < MESSAGE_COUNT) {
// Increase length
if (d_write_flag) {
gpuErrchk(cudaFree(d_write_flag));
}
d_write_flag_len = static_cast<unsigned int>(MESSAGE_COUNT * 1.1f);
gpuErrchk(cudaMalloc(&d_write_flag, sizeof(unsigned int) * d_write_flag_len));
}
t_d_write_flag = d_write_flag;
}
scatter.arrayMessageReorder(streamId, stream, this->sim_message.getMessageDescription().variables, read_list, write_list, MESSAGE_COUNT, hd_metadata.length, t_d_write_flag);
this->sim_message.swap();
// Reset message count back to full array length
// Array message exposes not output messages as 0
if (MESSAGE_COUNT != hd_metadata.length)
this->sim_message.setMessageCount(hd_metadata.length);
// Detect errors
// TODO
}
MessageArray3D::Data::Data(const std::shared_ptr<const ModelData> &model, const std::string &message_name)
: MessageBruteForce::Data(model, message_name)
, dimensions({0, 0, 0}) {
description = std::unique_ptr<MessageArray3D::Description>(new MessageArray3D::Description(model, this));
variables.emplace("___INDEX", Variable(1, size_type()));
}
MessageArray3D::Data::Data(const std::shared_ptr<const ModelData> &model, const Data &other)
: MessageBruteForce::Data(model, other)
, dimensions(other.dimensions) {
description = std::unique_ptr<MessageArray3D::Description>(model ? new MessageArray3D::Description(model, this) : nullptr);
if (dimensions[0] == 0 || dimensions[1] == 0 || dimensions[2] == 0) {
THROW exception::InvalidMessage("All dimensions must be above zero in array3D message '%s'\n", other.name.c_str());
}
}
MessageArray3D::Data *MessageArray3D::Data::clone(const std::shared_ptr<const ModelData> &newParent) {
return new Data(newParent, *this);
}
std::unique_ptr<MessageSpecialisationHandler> MessageArray3D::Data::getSpecialisationHander(CUDAMessage &owner) const {
return std::unique_ptr<MessageSpecialisationHandler>(new CUDAModelHandler(owner));
}
std::type_index MessageArray3D::Data::getType() const { return std::type_index(typeid(MessageArray3D)); }
MessageArray3D::Description::Description(const std::shared_ptr<const ModelData> &_model, Data *const data)
: MessageBruteForce::Description(_model, data) { }
void MessageArray3D::Description::setDimensions(const size_type& len_x, const size_type& len_y, const size_type& len_z) {
setDimensions({ len_x , len_y, len_z});
}
void MessageArray3D::Description::setDimensions(const std::array<size_type, 3> &dims) {
if (dims[0] == 0 || dims[1] == 0 || dims[2] == 0) {
THROW exception::InvalidArgument("All dimensions must be above zero in array3D message.\n");
}
reinterpret_cast<Data *>(message)->dimensions = dims;
}
std::array<MessageArray3D::size_type, 3> MessageArray3D::Description::getDimensions() const {
return reinterpret_cast<Data *>(message)->dimensions;
}
MessageArray2D::size_type MessageArray3D::Description::getDimX() const {
return reinterpret_cast<Data *>(message)->dimensions[0];
}
MessageArray2D::size_type MessageArray3D::Description::getDimY() const {
return reinterpret_cast<Data *>(message)->dimensions[1];
}
MessageArray2D::size_type MessageArray3D::Description::getDimZ() const {
return reinterpret_cast<Data *>(message)->dimensions[2];
}
} // namespace flamegpu
|
879fffed149229e825dd87595669e37525ee28f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "config.h"
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
texture<float, 1, hipReadModeElementType> tex_mx;
texture<float, 1, hipReadModeElementType> tex_my;
texture<float, 1, hipReadModeElementType> tex_mz;
//#define cfd_SUPER_BLOCKS_PER_SM 5
//const int BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = BLOCK_SIZE * MSIZE;
const int cfd_maxNeighbors = 8;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<MSIZE*BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = tex1Dfetch(tex_mx,i);//mx[i];
momentum_i.y = tex1Dfetch(tex_my,i);//my[i];
momentum_i.z = tex1Dfetch(tex_mz,i);//mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x =tex1Dfetch(tex_mx,nb);// mx[nb];
momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb];
momentum_nb.z =tex1Dfetch(tex_mz,nb);// mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
hipHostMalloc((void**)&density, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mx, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&my, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mz, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&density_energy, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipHostMalloc((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
hipHostMalloc((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
hipMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
hipMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//hipMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
hipMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, BLOCK_SIZE);
hipMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), hipMemcpyHostToDevice);
// Copy data to GPU
hipMemcpy(d_density, density, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_my, my, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), hipMemcpyHostToDevice);
hipChannelFormatDesc chDesc1 = hipCreateChannelDesc<float>();
hipChannelFormatDesc chDesc2 = hipCreateChannelDesc<float>();
hipChannelFormatDesc chDesc3 = hipCreateChannelDesc<float>();
tex_mx.filterMode = hipFilterModePoint;
tex_mx.normalized = false;
tex_mx.channelDesc = chDesc1;
tex_my.filterMode = hipFilterModePoint;
tex_my.normalized = false;
tex_my.channelDesc = chDesc2;
tex_mz.filterMode = hipFilterModePoint;
tex_mz.normalized = false;
tex_mz.channelDesc = chDesc3;
hipBindTexture(NULL,&tex_mx,d_mx,&chDesc1,cfd_nAtom*sizeof(float));
hipBindTexture(NULL,&tex_my,d_my,&chDesc2,cfd_nAtom*sizeof(float));
hipBindTexture(NULL,&tex_mz,d_mz,&chDesc3,cfd_nAtom*sizeof(float));
hipSetDeviceFlags(hipDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
hipHostMalloc((void**)&flag_cfd,sizeof( int),hipHostMallocMapped);
hipHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
int cfd_gridSize = (cfd_nAtom-1+BLOCK_SIZE) / BLOCK_SIZE;
for(int i = 0; i <5; i++)
{
hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
}
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
for(int i = 0; i <ITERATIONS; i++)
{
hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
}
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
hipMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), hipMemcpyDeviceToHost);
//check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
| 879fffed149229e825dd87595669e37525ee28f9.cu | #include "config.h"
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
texture<float, 1, cudaReadModeElementType> tex_mx;
texture<float, 1, cudaReadModeElementType> tex_my;
texture<float, 1, cudaReadModeElementType> tex_mz;
//#define cfd_SUPER_BLOCKS_PER_SM 5
//const int BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = BLOCK_SIZE * MSIZE;
const int cfd_maxNeighbors = 8;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<MSIZE*BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = tex1Dfetch(tex_mx,i);//mx[i];
momentum_i.y = tex1Dfetch(tex_my,i);//my[i];
momentum_i.z = tex1Dfetch(tex_mz,i);//mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x =tex1Dfetch(tex_mx,nb);// mx[nb];
momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb];
momentum_nb.z =tex1Dfetch(tex_mz,nb);// mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
cudaMallocHost((void**)&density, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mx, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&my, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mz, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&density_energy, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMallocHost((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMallocHost((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
cudaMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//cudaMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
cudaMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, BLOCK_SIZE);
cudaMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), cudaMemcpyHostToDevice);
// Copy data to GPU
cudaMemcpy(d_density, density, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_my, my, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), cudaMemcpyHostToDevice);
cudaChannelFormatDesc chDesc1 = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc chDesc2 = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc chDesc3 = cudaCreateChannelDesc<float>();
tex_mx.filterMode = cudaFilterModePoint;
tex_mx.normalized = false;
tex_mx.channelDesc = chDesc1;
tex_my.filterMode = cudaFilterModePoint;
tex_my.normalized = false;
tex_my.channelDesc = chDesc2;
tex_mz.filterMode = cudaFilterModePoint;
tex_mz.normalized = false;
tex_mz.channelDesc = chDesc3;
cudaBindTexture(NULL,&tex_mx,d_mx,&chDesc1,cfd_nAtom*sizeof(float));
cudaBindTexture(NULL,&tex_my,d_my,&chDesc2,cfd_nAtom*sizeof(float));
cudaBindTexture(NULL,&tex_mz,d_mz,&chDesc3,cfd_nAtom*sizeof(float));
cudaSetDeviceFlags(cudaDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
cudaHostAlloc((void**)&flag_cfd,sizeof( int),cudaHostAllocMapped);
cudaHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
int cfd_gridSize = (cfd_nAtom-1+BLOCK_SIZE) / BLOCK_SIZE;
for(int i = 0; i <5; i++)
{
cfd_kernel<<<cfd_gridSize, BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
}
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
for(int i = 0; i <ITERATIONS; i++)
{
cfd_kernel<<<cfd_gridSize, BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
}
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
cudaMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), cudaMemcpyDeviceToHost);
//check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
|
c772351fdad3238d1e99797c872c707e57e0bd89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <hiprand/hiprand_kernel.h>
#include <time.h>
using namespace std;
#define N 200 // The xy Dimensions/Size of the Two Input Matrices
#define blockSize 32
/////////////// MatrixMultiply Kernel NAIVE ///////////////
// Assumes N Blocks with 32 Threads Each
__global__ void MatrixMultiplyNaive(int *A, int *B, int *C, N){
// Need to account for different smx tid's
int Atid = blockIdx.x * blockDim.x + threadIdx.x;
int Btid = blockIdx.y * blockDim.y + threadIdx.y;
// Assuming Square Matrices
float floatingPointSum = 0.00f;
// Do the Multiplication
for (int i = 0; i < N; i++) {
// Sum Each Corresponding Multiplication
floatingPointSum = floatingPointSum + (A[(Atid * N) + i] * B[(i * N) + Btid]);
}
// Put the Result in the Output Array
C[(Atid * N) + Btid] = floatingPointSum;
}
/////////////// MatrixMultiply Kernel SHARED ///////////////
// Assumes N Blocks with 32 Threads Each
__global__ void MatrixMultiplyShared(int *X, int *Y, int *Z, N){
// Store Elements in Shared Memory Vars
__shared__ matrixA[N];
__shared__ matrixB[N];
// Need to account for different smx tid's
int Xtid = blockIdx.x * blockDim.x + threadIdx.x;
int Ytid = blockIdx.y * blockDim.y + threadIdx.y;
// Copy Matrix from Global to Shared Memory
for (int i = 0; i < N; i++) {
matrixA[i] = X[i];
matrixB[i] = Y[i];
}
// Ensure Copy is Complete by Syncing
__syncthreads();
// Assuming Square Matrices
float floatingPointSum = 0.00f;
// Do the Multiplication
for (int i = 0; i < N; i++) {
// Sum Each Corresponding Multiplication, Using Shared Copies of Matrices
floatingPointSum = floatingPointSum + (matrixA[(Xtid * N) + i] * matrixB[(i * N) + Ytid]);
}
// Sync Again
__syncthreads();
// Put the Result in the Output Array
Z[(Xtid * N) + Ytid] = floatingPointSum;
}
/////////////// MatrixAdd Kernel ///////////////
// Assumes N Blocks with 32 Threads Each
__global__ void MatrixAdd(int *C, int *Z, int *Output){
// Need to Account for Different SMX Tid's
int Ctid = blockIdx.x * blockDim.x + threadIdx.x;
int Ztid = blockIdx.y * blockDim.y + threadIdx.y;
// Assuming Square Matrices
float floatingPointSum = 0.00f;
// Do the Addition
int maximumXvalue = N
int location = maximumXvalue * Ztid + Ctid;
// Put the Result in the Output Array
if (location < N) {
Output[location] = C[location] + Z[location];
}
}
/////////////// Main ///////////////
int main(){
int A[N * N], B[N * N], C[N * N], X[N * N], Y[N * N], Z[N * N], Output[N * N]; // Input Data: X, Y; Output Data: Z
int *dev_A, *dev_B, *dev_C, *dev_X, *dev_Y, *dev_Z, *dev_Output; // Device Data Pointers
// Allocate Memory on the Device/GPU
hipMalloc((void**)&dev_A, N*sizeof(int));
hipMalloc((void**)&dev_B, N*sizeof(int));
hipMalloc((void**)&dev_C, N*sizeof(int));
hipMalloc((void**)&dev_X, N*sizeof(int));
hipMalloc((void**)&dev_Y, N*sizeof(int));
hipMalloc((void**)&dev_Z, N*sizeof(int));
hipMalloc((void**)&dev_Output, N*sizeof(int));
// Fill Input Arrays that are Size N x N
int arrayLength = N * N;
for(int i = 0; i < arrayLength; i++){
A[i] = hiprand_uniform(&localState);
B[i] = hiprand_uniform(&localState);
C[i] = hiprand_uniform(&localState);
X[i] = hiprand_uniform(&localState);
Y[i] = hiprand_uniform(&localState);
Z[i] = hiprand_uniform(&localState);
Output[i] = hiprand_uniform(&localState);
}
/////////////// Stream 1 ///////////////
// Copy Data to the Device
hipMemcpyAsync(dev_A,A,N * N*sizeof(int),hipMemcpyHostToDevice, 1);
hipMemcpyAsync(dev_B,B,N * N*sizeof(int),hipMemcpyHostToDevice, 1);
// Create Event for Timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Cuda Kernel Call
int gridSize = (N+(blockSize-1)) / blockSize;
// Call Event
hipEventRecord(start, stream = 1);
hipLaunchKernelGGL(( MatrixMultiplyNaive), dim3(gridSize),dim3(blockSize), 0, 1, dev_A, dev_B, dev_C);
hipEventRecord(stop);
// Copy Memory off of the Device
hipMemcpyAsync(C, dev_C, N * N*sizeof(int), hipMemcpyDeviceToHost, 3);
// Stop Event
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout << "Time Elapsed: " << milliseconds << endl;
/////////////// Stream 2 ///////////////
// Copy Data to the Device
hipMemcpyAsync(dev_X,X,N * N*sizeof(int),hipMemcpyHostToDevice, 2);
hipMemcpyAsync(dev_Y,Y,N * N*sizeof(int),hipMemcpyHostToDevice, 2);
// Create Event
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Cuda Kernel Call
int gridSize = (N+(blockSize-1)) / blockSize;
// Call Event
hipEventRecord(start, stream = 2);
hipLaunchKernelGGL(( MatrixMultiplyShared), dim3(gridSize),dim3(blockSize), 0, 2, dev_X, dev_Y, dev_Z);
hipEventRecord(stop);
// Copy Memory off of the Device
hipMemcpyAsync(Z, dev_Z, N * N*sizeof(int), hipMemcpyDeviceToHost, 3);
// Stop Event
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout << "Time Elapsed: " << milliseconds << endl;
/////////////// Stream 3 ///////////////
// Copy Data to the Device
hipMemcpy(dev_C,C,N * N*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_Z,Z,N * N*sizeof(int),hipMemcpyHostToDevice);
// Create Event
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Cuda Kernel Call
int gridSize = (N+(blockSize-1)) / blockSize;
// Call Event
hipEventRecord(start, stream = 3);
hipLaunchKernelGGL(( MatrixMultiplyShared), dim3(gridSize),dim3(blockSize), 0, 3, dev_C, dev_Z, dev_Output);
hipEventRecord(stop);
// Copy Memory off of the Device
hipMemcpy(Output, dev_Output, N * N*sizeof(int), hipMemcpyDeviceToHost);
// Stop Event
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout << "Time Elapsed: " << milliseconds << endl;
// Free Memory
hipFree(dev_A);
hipFree(dev_B);
hipFree(dev_C);
hipFree(dev_X);
hipFree(dev_Y);
hipFree(dev_Z);
hipFree(dev_Output);
} | c772351fdad3238d1e99797c872c707e57e0bd89.cu | #include <iostream>
#include <math.h>
#include <curand_kernel.h>
#include <time.h>
using namespace std;
#define N 200 // The xy Dimensions/Size of the Two Input Matrices
#define blockSize 32
/////////////// MatrixMultiply Kernel NAIVE ///////////////
// Assumes N Blocks with 32 Threads Each
__global__ void MatrixMultiplyNaive(int *A, int *B, int *C, N){
// Need to account for different smx tid's
int Atid = blockIdx.x * blockDim.x + threadIdx.x;
int Btid = blockIdx.y * blockDim.y + threadIdx.y;
// Assuming Square Matrices
float floatingPointSum = 0.00f;
// Do the Multiplication
for (int i = 0; i < N; i++) {
// Sum Each Corresponding Multiplication
floatingPointSum = floatingPointSum + (A[(Atid * N) + i] * B[(i * N) + Btid]);
}
// Put the Result in the Output Array
C[(Atid * N) + Btid] = floatingPointSum;
}
/////////////// MatrixMultiply Kernel SHARED ///////////////
// Assumes N Blocks with 32 Threads Each
__global__ void MatrixMultiplyShared(int *X, int *Y, int *Z, N){
// Store Elements in Shared Memory Vars
__shared__ matrixA[N];
__shared__ matrixB[N];
// Need to account for different smx tid's
int Xtid = blockIdx.x * blockDim.x + threadIdx.x;
int Ytid = blockIdx.y * blockDim.y + threadIdx.y;
// Copy Matrix from Global to Shared Memory
for (int i = 0; i < N; i++) {
matrixA[i] = X[i];
matrixB[i] = Y[i];
}
// Ensure Copy is Complete by Syncing
__syncthreads();
// Assuming Square Matrices
float floatingPointSum = 0.00f;
// Do the Multiplication
for (int i = 0; i < N; i++) {
// Sum Each Corresponding Multiplication, Using Shared Copies of Matrices
floatingPointSum = floatingPointSum + (matrixA[(Xtid * N) + i] * matrixB[(i * N) + Ytid]);
}
// Sync Again
__syncthreads();
// Put the Result in the Output Array
Z[(Xtid * N) + Ytid] = floatingPointSum;
}
/////////////// MatrixAdd Kernel ///////////////
// Assumes N Blocks with 32 Threads Each
__global__ void MatrixAdd(int *C, int *Z, int *Output){
// Need to Account for Different SMX Tid's
int Ctid = blockIdx.x * blockDim.x + threadIdx.x;
int Ztid = blockIdx.y * blockDim.y + threadIdx.y;
// Assuming Square Matrices
float floatingPointSum = 0.00f;
// Do the Addition
int maximumXvalue = N
int location = maximumXvalue * Ztid + Ctid;
// Put the Result in the Output Array
if (location < N) {
Output[location] = C[location] + Z[location];
}
}
/////////////// Main ///////////////
int main(){
int A[N * N], B[N * N], C[N * N], X[N * N], Y[N * N], Z[N * N], Output[N * N]; // Input Data: X, Y; Output Data: Z
int *dev_A, *dev_B, *dev_C, *dev_X, *dev_Y, *dev_Z, *dev_Output; // Device Data Pointers
// Allocate Memory on the Device/GPU
cudaMalloc((void**)&dev_A, N*sizeof(int));
cudaMalloc((void**)&dev_B, N*sizeof(int));
cudaMalloc((void**)&dev_C, N*sizeof(int));
cudaMalloc((void**)&dev_X, N*sizeof(int));
cudaMalloc((void**)&dev_Y, N*sizeof(int));
cudaMalloc((void**)&dev_Z, N*sizeof(int));
cudaMalloc((void**)&dev_Output, N*sizeof(int));
// Fill Input Arrays that are Size N x N
int arrayLength = N * N;
for(int i = 0; i < arrayLength; i++){
A[i] = curand_uniform(&localState);
B[i] = curand_uniform(&localState);
C[i] = curand_uniform(&localState);
X[i] = curand_uniform(&localState);
Y[i] = curand_uniform(&localState);
Z[i] = curand_uniform(&localState);
Output[i] = curand_uniform(&localState);
}
/////////////// Stream 1 ///////////////
// Copy Data to the Device
cudaMemcpyAsync(dev_A,A,N * N*sizeof(int),cudaMemcpyHostToDevice, 1);
cudaMemcpyAsync(dev_B,B,N * N*sizeof(int),cudaMemcpyHostToDevice, 1);
// Create Event for Timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Cuda Kernel Call
int gridSize = (N+(blockSize-1)) / blockSize;
// Call Event
cudaEventRecord(start, stream = 1);
MatrixMultiplyNaive<<<gridSize,blockSize, 0, 1>>>(dev_A, dev_B, dev_C);
cudaEventRecord(stop);
// Copy Memory off of the Device
cudaMemcpyAsync(C, dev_C, N * N*sizeof(int), cudaMemcpyDeviceToHost, 3);
// Stop Event
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "Time Elapsed: " << milliseconds << endl;
/////////////// Stream 2 ///////////////
// Copy Data to the Device
cudaMemcpyAsync(dev_X,X,N * N*sizeof(int),cudaMemcpyHostToDevice, 2);
cudaMemcpyAsync(dev_Y,Y,N * N*sizeof(int),cudaMemcpyHostToDevice, 2);
// Create Event
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Cuda Kernel Call
int gridSize = (N+(blockSize-1)) / blockSize;
// Call Event
cudaEventRecord(start, stream = 2);
MatrixMultiplyShared<<<gridSize,blockSize, 0, 2>>>(dev_X, dev_Y, dev_Z);
cudaEventRecord(stop);
// Copy Memory off of the Device
cudaMemcpyAsync(Z, dev_Z, N * N*sizeof(int), cudaMemcpyDeviceToHost, 3);
// Stop Event
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "Time Elapsed: " << milliseconds << endl;
/////////////// Stream 3 ///////////////
// Copy Data to the Device
cudaMemcpy(dev_C,C,N * N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_Z,Z,N * N*sizeof(int),cudaMemcpyHostToDevice);
// Create Event
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Cuda Kernel Call
int gridSize = (N+(blockSize-1)) / blockSize;
// Call Event
cudaEventRecord(start, stream = 3);
MatrixMultiplyShared<<<gridSize,blockSize, 0, 3>>>(dev_C, dev_Z, dev_Output);
cudaEventRecord(stop);
// Copy Memory off of the Device
cudaMemcpy(Output, dev_Output, N * N*sizeof(int), cudaMemcpyDeviceToHost);
// Stop Event
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "Time Elapsed: " << milliseconds << endl;
// Free Memory
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
cudaFree(dev_X);
cudaFree(dev_Y);
cudaFree(dev_Z);
cudaFree(dev_Output);
} |
b08b5d65dae2a20b83aedfd4e9f3761a8d61f9f0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason:%s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n, ", hostRef[i], gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n");
return;
}
void initialData(float *ip, int size) {
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++) {
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx=0; idx<N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// setup device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// vector size
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
// host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
double iStart, iElaps;
// initialize data on host
iStart = cpuSecond();
initialData (h_A, nElem);
initialData (h_B, nElem);
iElaps = cpuSecond() - iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// sum arrays on host
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElaps = cpuSecond() - iStart;
printf("sumArraysOnHost Time elapsed %f sec\n", iElaps);
// device memory
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((float**)&d_A, nBytes));
CHECK(hipMalloc((float**)&d_B, nBytes));
CHECK(hipMalloc((float**)&d_C, nBytes));
// host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_C, gpuRef, nBytes, hipMemcpyHostToDevice));
// host kernel
int iLen = 1024;
dim3 block(iLen);
dim3 grid((nElem + block.x - 1) / block.x);
iStart = cpuSecond();
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C, nElem);
CHECK(hipDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("sumArraysOnGPU <<<%d, %d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, iElaps);
// check kernel error
CHECK(hipGetLastError());
// copy result on device to host
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
// check result
checkResult(hostRef, gpuRef, nElem);
// free global memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return(0);
} | b08b5d65dae2a20b83aedfd4e9f3761a8d61f9f0.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason:%s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n, ", hostRef[i], gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n");
return;
}
void initialData(float *ip, int size) {
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++) {
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx=0; idx<N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// setup device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// vector size
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
// host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
double iStart, iElaps;
// initialize data on host
iStart = cpuSecond();
initialData (h_A, nElem);
initialData (h_B, nElem);
iElaps = cpuSecond() - iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// sum arrays on host
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElaps = cpuSecond() - iStart;
printf("sumArraysOnHost Time elapsed %f sec\n", iElaps);
// device memory
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_B, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
// host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice));
// host kernel
int iLen = 1024;
dim3 block(iLen);
dim3 grid((nElem + block.x - 1) / block.x);
iStart = cpuSecond();
sumArraysOnGPU<<< grid, block >>>(d_A, d_B, d_C, nElem);
CHECK(cudaDeviceSynchronize());
iElaps = cpuSecond() - iStart;
printf("sumArraysOnGPU <<<%d, %d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, iElaps);
// check kernel error
CHECK(cudaGetLastError());
// copy result on device to host
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
// check result
checkResult(hostRef, gpuRef, nElem);
// free global memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return(0);
} |
b44310399bca46b9bb3643901319dded26da073b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "preencheGenes.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int n = 1;
unsigned int np = 1;
int *genes = NULL;
hipMalloc(&genes, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
preencheGenes), dim3(gridBlock),dim3(threadBlock), 0, 0, n,np,genes);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
preencheGenes), dim3(gridBlock),dim3(threadBlock), 0, 0, n,np,genes);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
preencheGenes), dim3(gridBlock),dim3(threadBlock), 0, 0, n,np,genes);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b44310399bca46b9bb3643901319dded26da073b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "preencheGenes.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int n = 1;
unsigned int np = 1;
int *genes = NULL;
cudaMalloc(&genes, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
preencheGenes<<<gridBlock,threadBlock>>>(n,np,genes);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
preencheGenes<<<gridBlock,threadBlock>>>(n,np,genes);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
preencheGenes<<<gridBlock,threadBlock>>>(n,np,genes);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
413b0c28fede4be11068a9540da80f16b8249521.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Vinh Dang
* [email protected]
*
* udfa_gpu.cu
*/
#include "common.h"
#include "udfa_gpu.h"
__global__ void udfa_kernel(
state_t *input_dfa_state_tables,
symboln *input,
unsigned int *pkt_size_vec, unsigned int pkt_size,
unsigned int *match_count, match_type *match_array, unsigned int match_vec_size,
unsigned int *accum_dfa_state_table_lengths, unsigned int n_subsets){
unsigned int dfa_id = threadIdx.x + blockIdx.y * blockDim.x;
match_type tmp_match;
if (dfa_id >= n_subsets)
return;
unsigned int shr_match_count = 0;
//cur_pkt_size is the input string length of the packet
unsigned int cur_pkt_size = pkt_size_vec[blockIdx.x];
//jump to the right input string
input += (pkt_size * blockIdx.x/fetch_bytes);
unsigned int accum_dfa_state_table_length = accum_dfa_state_table_lengths[dfa_id];
state_t current_state = 0;
//loop over payload
for(unsigned int p=0; p<cur_pkt_size; p+=fetch_bytes, input++){
symboln Input_ = *input;//fetch 4 bytes from the input string
for (unsigned int byt = 0; byt < fetch_bytes; byt++) {
unsigned int Input = Input_ & 0xFF;//extract 1 byte
Input_ = Input_ >> 8;//Input_ right-shifted by 8 bits
//query the state table on the input symbol for the next state
current_state = input_dfa_state_tables [current_state * CSIZE + Input + accum_dfa_state_table_length];
if (current_state < 0) {//Added for matching operation: check if the dst state is an accepting state
current_state = -current_state;
//match_offset[match_vec_size*blockIdx.x + shr_match_count + dfa_id*match_vec_size*nstreams] = p;
//match_states[match_vec_size*blockIdx.x + shr_match_count + dfa_id*match_vec_size*nstreams] = current_state;
tmp_match.off = p + byt;
tmp_match.stat = current_state;
match_array[shr_match_count + match_vec_size*(blockIdx.x + dfa_id*gridDim.x)] = tmp_match;
shr_match_count = shr_match_count + 1;
}
}
}
match_count[blockIdx.x + dfa_id*gridDim.x] = shr_match_count;
}
| 413b0c28fede4be11068a9540da80f16b8249521.cu | /*
* Vinh Dang
* [email protected]
*
* udfa_gpu.cu
*/
#include "common.h"
#include "udfa_gpu.h"
__global__ void udfa_kernel(
state_t *input_dfa_state_tables,
symboln *input,
unsigned int *pkt_size_vec, unsigned int pkt_size,
unsigned int *match_count, match_type *match_array, unsigned int match_vec_size,
unsigned int *accum_dfa_state_table_lengths, unsigned int n_subsets){
unsigned int dfa_id = threadIdx.x + blockIdx.y * blockDim.x;
match_type tmp_match;
if (dfa_id >= n_subsets)
return;
unsigned int shr_match_count = 0;
//cur_pkt_size is the input string length of the packet
unsigned int cur_pkt_size = pkt_size_vec[blockIdx.x];
//jump to the right input string
input += (pkt_size * blockIdx.x/fetch_bytes);
unsigned int accum_dfa_state_table_length = accum_dfa_state_table_lengths[dfa_id];
state_t current_state = 0;
//loop over payload
for(unsigned int p=0; p<cur_pkt_size; p+=fetch_bytes, input++){
symboln Input_ = *input;//fetch 4 bytes from the input string
for (unsigned int byt = 0; byt < fetch_bytes; byt++) {
unsigned int Input = Input_ & 0xFF;//extract 1 byte
Input_ = Input_ >> 8;//Input_ right-shifted by 8 bits
//query the state table on the input symbol for the next state
current_state = input_dfa_state_tables [current_state * CSIZE + Input + accum_dfa_state_table_length];
if (current_state < 0) {//Added for matching operation: check if the dst state is an accepting state
current_state = -current_state;
//match_offset[match_vec_size*blockIdx.x + shr_match_count + dfa_id*match_vec_size*nstreams] = p;
//match_states[match_vec_size*blockIdx.x + shr_match_count + dfa_id*match_vec_size*nstreams] = current_state;
tmp_match.off = p + byt;
tmp_match.stat = current_state;
match_array[shr_match_count + match_vec_size*(blockIdx.x + dfa_id*gridDim.x)] = tmp_match;
shr_match_count = shr_match_count + 1;
}
}
}
match_count[blockIdx.x + dfa_id*gridDim.x] = shr_match_count;
}
|
5865b3ff2698dd7fad1aab4a58ba25d932ded391.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
#include <sys/time.h>
/////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//////////////large vs small data.
void init_cpu_data(long long int* A, long long int size, long long int stride){
for (long long int i = 0; i < size; i++){
A[i]=1;
}
/*
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
*/
}
__global__ void gpu_initialization(long long int *A, long long int data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x);
long long int thread_num = gridDim.x * blockDim.x;
for(long long int it = 0; it < data_size; it = it + thread_num){
A[index + it]=23;
}
}
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
//__global__ void Page_visitor(long long int *A, long long int *B, long long int data_stride, long long int clock_count){
__global__ void Page_visitor(long long int *A, long long int data_stride, long long int clock_count){////load-compute -store
/*
long long int index = threadIdx.x;
/////////////////////////////////time
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
if(index = 0){
start_time= clock64();
}
__syncthreads();
*/
long long int index = (blockIdx.x * blockDim.x + threadIdx.x) * data_stride;
long long int value = A[index];
/*
//////////////////////////////////////////////sleep
long long int start_clock = clock64();
long long int clock_offset = 0;
while (clock_offset < clock_count)
{
clock_offset = clock64() - start_clock;
}
*/
//////////////////////////////////////////////loop
long long int clock_offset = 0;
while (clock_offset < clock_count){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
value = value + threadIdx.x;
}
/*
if(threadIdx.x == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
int smid = 1;
asm("mov.u32 %0, %smid;" : "=r"(smid) );
printf("blockIdx.x: %d, smid: %d\n", blockIdx.x, smid);
if(blockIdx.x == 55){
int nsmid = 1;
asm("mov.u32 %0, %smid;" : "=r"(nsmid) );
printf("nsmid: %d\n", nsmid);
}
}
*/
//d_o[0] = clock_offset;
//////////////////////////////////////////////sleep
A[index] = value;
/*
__syncthreads();
/////////////////////////////////time
if(index = 0){
start_time= clock64();
time_interval = end_time - start_time;//////clock
}
//B[0] = time_interval;
*/
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//long long int dev_id = findCudaDevice(argc, (const char **) argv);
long long int dev_id = 7;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
checkCudaErrors(hipSetDevice(dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
printf("clock_rate:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id));
printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1);
//plain managed
///also add more cases with different strides at the same data size?
///it might be more interesting to see smaller stride since real applications might be so.
///will warpid(in the same sm) and ctaid be assigned again? //////////warpid will be reused, ctaid will not.
//////////////0.5gb to 64gb stride 0.5gb (1 * 4 * 1024)
//for(long long int data_stride = 1 * 1 * 2; data_stride <= 1 * 1 * 256; data_stride = data_stride + 1 * 1 * 2){/////512 is 4m, see what happens after 2m. 128 positions.
for(long long int factor = 1; factor <= 8; factor = factor * 2){
printf("###################\n#########################managed\n");
for(long long int data_stride = 1 * 1 * 8 / factor; data_stride <= 1 * 1 * 512 / factor; data_stride = data_stride + 1 * 1 * 8 / factor){////test
//for(long long int data_stride = 1 * 256 * 1024; data_stride <= 1 * 256 * 1024; data_stride = data_stride + 1 * 8 * 1024){
for(long long int mod = 4294967296; mod <= 4294967296; mod = mod * 2){////134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
for(long long int clock_count = 128; clock_count <= 128; clock_count = clock_count * 2){
///////////////////////////////////////////////////////////////////CPU data begin
//long long int data_size = mod;
long long int data_size = data_stride;
data_size = data_size * 16384 * factor;
data_size = data_size * 512;
//long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (long long int*)malloc(sizeof(long long int) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
//init_cpu_data(CPU_data_in, data_size, data_stride);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//long long int *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(long long int) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(long long int) * data_size, hipMemcpyHostToDevice);
/*
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out;
//checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(long long int) * data_size));
checkCudaErrors(hipMallocManaged(&GPU_data_out, sizeof(long long int) * data_size));/////////////using unified memory
*/
hipLaunchKernelGGL(( gpu_initialization), dim3(16384 * factor), dim3(512), 0, 0, CPU_data_in, data_stride, data_size);///////////////1024 per block max
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
hipLaunchKernelGGL(( Page_visitor), dim3(16384 * factor), dim3(512), 0, 0, CPU_data_in, data_stride, clock_count);///////////////1024 per block max
///////////////////////////////////////////////////32 * 512 * 1 * 256 * 1024 = 32gb, 32 * 512 * 1 * 512 * 1024 = 64gb.
hipDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
checkCudaErrors(hipFree(CPU_data_in));
//checkCudaErrors(hipFree(GPU_data_out));
}
printf("\n");
}
}
}
exit(EXIT_SUCCESS);
} | 5865b3ff2698dd7fad1aab4a58ba25d932ded391.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
#include <sys/time.h>
/////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca"
//////////////large vs small data.
void init_cpu_data(long long int* A, long long int size, long long int stride){
for (long long int i = 0; i < size; i++){
A[i]=1;
}
/*
for (long long int i = 0; i < size - stride; i++){
A[i]=(i + stride);
}
for (long long int i = size - stride; i < size; i++){
A[i]=0;
}
*/
}
__global__ void gpu_initialization(long long int *A, long long int data_stride, long long int data_size){
long long int index = (blockIdx.x * blockDim.x + threadIdx.x);
long long int thread_num = gridDim.x * blockDim.x;
for(long long int it = 0; it < data_size; it = it + thread_num){
A[index + it]=23;
}
}
long long unsigned time_diff(timespec start, timespec end){
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0){
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
}
else{
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
long long unsigned time_interval_ns = temp.tv_nsec;
long long unsigned time_interval_s = temp.tv_sec;
time_interval_s = time_interval_s * 1000000000;
return time_interval_s + time_interval_ns;
}
//__global__ void Page_visitor(long long int *A, long long int *B, long long int data_stride, long long int clock_count){
__global__ void Page_visitor(long long int *A, long long int data_stride, long long int clock_count){////load-compute -store
/*
long long int index = threadIdx.x;
/////////////////////////////////time
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
if(index = 0){
start_time= clock64();
}
__syncthreads();
*/
long long int index = (blockIdx.x * blockDim.x + threadIdx.x) * data_stride;
long long int value = A[index];
/*
//////////////////////////////////////////////sleep
long long int start_clock = clock64();
long long int clock_offset = 0;
while (clock_offset < clock_count)
{
clock_offset = clock64() - start_clock;
}
*/
//////////////////////////////////////////////loop
long long int clock_offset = 0;
while (clock_offset < clock_count){/////////////////what's the time overhead for addition and multiplication?
clock_offset++;
value = value + threadIdx.x;
}
/*
if(threadIdx.x == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid
int smid = 1;
asm("mov.u32 %0, %smid;" : "=r"(smid) );
printf("blockIdx.x: %d, smid: %d\n", blockIdx.x, smid);
if(blockIdx.x == 55){
int nsmid = 1;
asm("mov.u32 %0, %smid;" : "=r"(nsmid) );
printf("nsmid: %d\n", nsmid);
}
}
*/
//d_o[0] = clock_offset;
//////////////////////////////////////////////sleep
A[index] = value;
/*
__syncthreads();
/////////////////////////////////time
if(index = 0){
start_time= clock64();
time_interval = end_time - start_time;//////clock
}
//B[0] = time_interval;
*/
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//long long int dev_id = findCudaDevice(argc, (const char **) argv);
long long int dev_id = 7;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
checkCudaErrors(cudaSetDevice(dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
printf("clock_rate:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
if (device_prop.concurrentManagedAccess == 1){
printf("This device supports concurrent Managed Access.\n");
}else{
printf("This device does not support concurrent Managed Access.\n");
}
int value1 = 1;
checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id));
printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1);
//plain managed
///also add more cases with different strides at the same data size?
///it might be more interesting to see smaller stride since real applications might be so.
///will warpid(in the same sm) and ctaid be assigned again? //////////warpid will be reused, ctaid will not.
//////////////0.5gb to 64gb stride 0.5gb (1 * 4 * 1024)
//for(long long int data_stride = 1 * 1 * 2; data_stride <= 1 * 1 * 256; data_stride = data_stride + 1 * 1 * 2){/////512 is 4m, see what happens after 2m. 128 positions.
for(long long int factor = 1; factor <= 8; factor = factor * 2){
printf("###################\n#########################managed\n");
for(long long int data_stride = 1 * 1 * 8 / factor; data_stride <= 1 * 1 * 512 / factor; data_stride = data_stride + 1 * 1 * 8 / factor){////test
//for(long long int data_stride = 1 * 256 * 1024; data_stride <= 1 * 256 * 1024; data_stride = data_stride + 1 * 8 * 1024){
for(long long int mod = 4294967296; mod <= 4294967296; mod = mod * 2){////134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index)
for(long long int clock_count = 128; clock_count <= 128; clock_count = clock_count * 2){
///////////////////////////////////////////////////////////////////CPU data begin
//long long int data_size = mod;
long long int data_size = data_stride;
data_size = data_size * 16384 * factor;
data_size = data_size * 512;
//long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
long long int *CPU_data_in;
//CPU_data_in = (long long int*)malloc(sizeof(long long int) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(long long int) * data_size));/////////////using unified memory
//init_cpu_data(CPU_data_in, data_size, data_stride);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//long long int *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(long long int) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(long long int) * data_size, cudaMemcpyHostToDevice);
/*
///////////////////////////////////////////////////////////////////GPU data out
long long int *GPU_data_out;
//checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(long long int) * data_size));
checkCudaErrors(cudaMallocManaged(&GPU_data_out, sizeof(long long int) * data_size));/////////////using unified memory
*/
gpu_initialization<<<16384 * factor, 512>>>(CPU_data_in, data_stride, data_size);///////////////1024 per block max
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts1;
clock_gettime(CLOCK_REALTIME, &ts1);
Page_visitor<<<16384 * factor, 512>>>(CPU_data_in, data_stride, clock_count);///////////////1024 per block max
///////////////////////////////////////////////////32 * 512 * 1 * 256 * 1024 = 32gb, 32 * 512 * 1 * 512 * 1024 = 64gb.
cudaDeviceSynchronize();
/////////////////////////////////time
struct timespec ts2;
clock_gettime(CLOCK_REALTIME, &ts2);
//printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count);
//printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2));
printf("%llu ", time_diff(ts1, ts2));
checkCudaErrors(cudaFree(CPU_data_in));
//checkCudaErrors(cudaFree(GPU_data_out));
}
printf("\n");
}
}
}
exit(EXIT_SUCCESS);
} |
34978e63d254a862e19c1440e2a567bfac899a6e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hipfft.h> // CUDA FFT Libraries
#include <helper_cuda.h> // Helper functions for CUDA Error handling
// OpenGL Graphics includes
#include <GLES3/gl31.h>
// FluidsGLES CUDA kernel definitions
#include "fluidsGLES_kernels.cuh"
// Texture object for reading velocity field
hipTextureObject_t texObj;
static hipArray *array = NULL;
// Particle data
extern GLuint vbo; // OpenGL vertex buffer object
extern struct cudaGraphicsResource *cuda_vbo_resource; // handles OpenGL-CUDA exchange
// Texture pitch
extern size_t tPitch;
extern hipfftHandle planr2c;
extern hipfftHandle planc2r;
cData *vxfield = NULL;
cData *vyfield = NULL;
void setupTexture(int x, int y)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<float2>();
hipMallocArray(&array, &desc, y, x);
getLastCudaError("hipMalloc failed");
hipResourceDesc texRes;
memset(&texRes,0,sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = array;
hipTextureDesc texDescr;
memset(&texDescr,0,sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = hipFilterModeLinear;
texDescr.addressMode[0] = hipAddressModeWrap;
texDescr.readMode = hipReadModeElementType;
checkCudaErrors(hipCreateTextureObject(&texObj, &texRes, &texDescr, NULL));
}
void updateTexture(cData *data, size_t wib, size_t h, size_t pitch)
{
checkCudaErrors(hipMemcpy2DToArray(array, 0, 0, data, pitch, wib, h, hipMemcpyDeviceToDevice));
}
void deleteTexture(void)
{
checkCudaErrors(hipDestroyTextureObject(texObj));
checkCudaErrors(hipFreeArray(array));
}
// Note that these kernels are designed to work with arbitrary
// domain sizes, not just domains that are multiples of the tile
// size. Therefore, we have extra code that checks to make sure
// a given thread location falls within the domain boundaries in
// both X and Y. Also, the domain is covered by looping over
// multiple elements in the Y direction, while there is a one-to-one
// mapping between threads in X and the tile size in X.
// Nolan Goodnight 9/22/06
// This method adds constant force vectors to the velocity field
// stored in 'v' according to v(x,t+1) = v(x,t) + dt * f.
__global__ void
addForces_k(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r, size_t pitch)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
cData *fj = (cData *)((char *)v + (ty + spy) * pitch) + tx + spx;
cData vterm = *fj;
tx -= r;
ty -= r;
float s = 1.f / (1.f + tx*tx*tx*tx + ty*ty*ty*ty);
vterm.x += s * fx;
vterm.y += s * fy;
*fj = vterm;
}
// This method performs the velocity advection step, where we
// trace velocity vectors back in time to update each grid cell.
// That is, v(x,t+1) = v(p(x,-dt),t). Here we perform bilinear
// interpolation in the velocity space.
__global__ void
advectVelocity_k(cData *v, float *vx, float *vy,
int dx, int pdx, int dy, float dt, int lb, hipTextureObject_t texObject)
{
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
cData vterm, ploc;
float vxterm, vyterm;
// gtidx is the domain location in x for this thread
if (gtidx < dx)
{
for (p = 0; p < lb; p++)
{
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy)
{
int fj = fi * pdx + gtidx;
vterm = tex2D<cData>(texObject, (float)gtidx, (float)fi);
ploc.x = (gtidx + 0.5f) - (dt * vterm.x * dx);
ploc.y = (fi + 0.5f) - (dt * vterm.y * dy);
vterm = tex2D<cData>(texObject, ploc.x, ploc.y);
vxterm = vterm.x;
vyterm = vterm.y;
vx[fj] = vxterm;
vy[fj] = vyterm;
}
}
}
}
// This method performs velocity diffusion and forces mass conservation
// in the frequency domain. The inputs 'vx' and 'vy' are complex-valued
// arrays holding the Fourier coefficients of the velocity field in
// X and Y. Diffusion in this space takes a simple form described as:
// v(k,t) = v(k,t) / (1 + visc * dt * k^2), where visc is the viscosity,
// and k is the wavenumber. The projection step forces the Fourier
// velocity vectors to be orthogonal to the vectors for each
// wavenumber: v(k,t) = v(k,t) - ((k dot v(k,t) * k) / k^2.
__global__ void
diffuseProject_k(cData *vx, cData *vy, int dx, int dy, float dt,
float visc, int lb)
{
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
cData xterm, yterm;
// gtidx is the domain location in x for this thread
if (gtidx < dx)
{
for (p = 0; p < lb; p++)
{
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy)
{
int fj = fi * dx + gtidx;
xterm = vx[fj];
yterm = vy[fj];
// Compute the index of the wavenumber based on the
// data order produced by a standard NN FFT.
int iix = gtidx;
int iiy = (fi>dy/2)?(fi-(dy)):fi;
// Velocity diffusion
float kk = (float)(iix * iix + iiy * iiy); // k^2
float diff = 1.f / (1.f + visc * dt * kk);
xterm.x *= diff;
xterm.y *= diff;
yterm.x *= diff;
yterm.y *= diff;
// Velocity projection
if (kk > 0.f)
{
float rkk = 1.f / kk;
// Real portion of velocity projection
float rkp = (iix * xterm.x + iiy * yterm.x);
// Imaginary portion of velocity projection
float ikp = (iix * xterm.y + iiy * yterm.y);
xterm.x -= rkk * rkp * iix;
xterm.y -= rkk * ikp * iix;
yterm.x -= rkk * rkp * iiy;
yterm.y -= rkk * ikp * iiy;
}
vx[fj] = xterm;
vy[fj] = yterm;
}
}
}
}
// This method updates the velocity field 'v' using the two complex
// arrays from the previous step: 'vx' and 'vy'. Here we scale the
// real components by 1/(dx*dy) to account for an unnormalized FFT.
__global__ void
updateVelocity_k(cData *v, float *vx, float *vy,
int dx, int pdx, int dy, int lb, size_t pitch)
{
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
float vxterm, vyterm;
cData nvterm;
// gtidx is the domain location in x for this thread
if (gtidx < dx)
{
for (p = 0; p < lb; p++)
{
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy)
{
int fjr = fi * pdx + gtidx;
vxterm = vx[fjr];
vyterm = vy[fjr];
// Normalize the result of the inverse FFT
float scale = 1.f / (dx * dy);
nvterm.x = vxterm * scale;
nvterm.y = vyterm * scale;
cData *fj = (cData *)((char *)v + fi * pitch) + gtidx;
*fj = nvterm;
}
} // If this thread is inside the domain in Y
} // If this thread is inside the domain in X
}
// This method updates the particles by moving particle positions
// according to the velocity field and time step. That is, for each
// particle: p(t+1) = p(t) + dt * v(p(t)).
__global__ void
advectParticles_k(cData *part, cData *v, int dx, int dy,
float dt, int lb, size_t pitch)
{
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
// gtidx is the domain location in x for this thread
cData pterm, vterm;
if (gtidx < dx)
{
for (p = 0; p < lb; p++)
{
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy)
{
int fj = fi * dx + gtidx;
pterm = part[fj];
int xvi = ((int)(pterm.x * dx));
int yvi = ((int)(pterm.y * dy));
vterm = *((cData *)((char *)v + yvi * pitch) + xvi);
pterm.x += dt * vterm.x;
pterm.x = pterm.x - (int)pterm.x;
pterm.x += 1.f;
pterm.x = pterm.x - (int)pterm.x;
pterm.y += dt * vterm.y;
pterm.y = pterm.y - (int)pterm.y;
pterm.y += 1.f;
pterm.y = pterm.y - (int)pterm.y;
part[fj] = pterm;
}
} // If this thread is inside the domain in Y
} // If this thread is inside the domain in X
}
// These are the external function calls necessary for launching fluid simuation
extern "C"
void addForces(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r)
{
dim3 tids(2*r+1, 2*r+1);
hipLaunchKernelGGL(( addForces_k), dim3(1), dim3(tids), 0, 0, v, dx, dy, spx, spy, fx, fy, r, tPitch);
getLastCudaError("addForces_k failed.");
}
extern "C"
void advectVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt)
{
dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1));
dim3 tids(TIDSX, TIDSY);
updateTexture(v, DIM*sizeof(cData), DIM, tPitch);
hipLaunchKernelGGL(( advectVelocity_k), dim3(grid), dim3(tids), 0, 0, v, vx, vy, dx, pdx, dy, dt, TILEY/TIDSY, texObj);
getLastCudaError("advectVelocity_k failed.");
}
extern "C"
void diffuseProject(cData *vx, cData *vy, int dx, int dy, float dt, float visc)
{
// Forward FFT
checkCudaErrors(hipfftExecR2C(planr2c, (hipfftReal *)vx, (hipfftComplex *)vx));
checkCudaErrors(hipfftExecR2C(planr2c, (hipfftReal *)vy, (hipfftComplex *)vy));
uint3 grid = make_uint3((dx/TILEX)+(!(dx%TILEX)?0:1),
(dy/TILEY)+(!(dy%TILEY)?0:1), 1);
uint3 tids = make_uint3(TIDSX, TIDSY, 1);
hipLaunchKernelGGL(( diffuseProject_k), dim3(grid), dim3(tids), 0, 0, vx, vy, dx, dy, dt, visc, TILEY/TIDSY);
getLastCudaError("diffuseProject_k failed.");
// Inverse FFT
checkCudaErrors(hipfftExecC2R(planc2r, (hipfftComplex *)vx, (hipfftReal *)vx));
checkCudaErrors(hipfftExecC2R(planc2r, (hipfftComplex *)vy, (hipfftReal *)vy));
}
extern "C"
void updateVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy)
{
dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1));
dim3 tids(TIDSX, TIDSY);
hipLaunchKernelGGL(( updateVelocity_k), dim3(grid), dim3(tids), 0, 0, v, vx, vy, dx, pdx, dy, TILEY/TIDSY, tPitch);
getLastCudaError("updateVelocity_k failed.");
}
extern "C"
void advectParticles(GLuint vbo, cData *v, int dx, int dy, float dt)
{
dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1));
dim3 tids(TIDSX, TIDSY);
cData *p;
checkCudaErrors(hipGraphicsMapResources(1, &cuda_vbo_resource, 0));
getLastCudaError("hipGraphicsMapResources failed");
size_t num_bytes;
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&p, &num_bytes,
cuda_vbo_resource));
getLastCudaError("hipGraphicsResourceGetMappedPointer failed");
hipLaunchKernelGGL(( advectParticles_k), dim3(grid), dim3(tids), 0, 0, p, v, dx, dy, dt, TILEY/TIDSY, tPitch);
getLastCudaError("advectParticles_k failed.");
checkCudaErrors(hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
}
| 34978e63d254a862e19c1440e2a567bfac899a6e.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cufft.h> // CUDA FFT Libraries
#include <helper_cuda.h> // Helper functions for CUDA Error handling
// OpenGL Graphics includes
#include <GLES3/gl31.h>
// FluidsGLES CUDA kernel definitions
#include "fluidsGLES_kernels.cuh"
// Texture object for reading velocity field
cudaTextureObject_t texObj;
static cudaArray *array = NULL;
// Particle data
extern GLuint vbo; // OpenGL vertex buffer object
extern struct cudaGraphicsResource *cuda_vbo_resource; // handles OpenGL-CUDA exchange
// Texture pitch
extern size_t tPitch;
extern cufftHandle planr2c;
extern cufftHandle planc2r;
cData *vxfield = NULL;
cData *vyfield = NULL;
void setupTexture(int x, int y)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float2>();
cudaMallocArray(&array, &desc, y, x);
getLastCudaError("cudaMalloc failed");
cudaResourceDesc texRes;
memset(&texRes,0,sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = array;
cudaTextureDesc texDescr;
memset(&texDescr,0,sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeWrap;
texDescr.readMode = cudaReadModeElementType;
checkCudaErrors(cudaCreateTextureObject(&texObj, &texRes, &texDescr, NULL));
}
void updateTexture(cData *data, size_t wib, size_t h, size_t pitch)
{
checkCudaErrors(cudaMemcpy2DToArray(array, 0, 0, data, pitch, wib, h, cudaMemcpyDeviceToDevice));
}
void deleteTexture(void)
{
checkCudaErrors(cudaDestroyTextureObject(texObj));
checkCudaErrors(cudaFreeArray(array));
}
// Note that these kernels are designed to work with arbitrary
// domain sizes, not just domains that are multiples of the tile
// size. Therefore, we have extra code that checks to make sure
// a given thread location falls within the domain boundaries in
// both X and Y. Also, the domain is covered by looping over
// multiple elements in the Y direction, while there is a one-to-one
// mapping between threads in X and the tile size in X.
// Nolan Goodnight 9/22/06
// This method adds constant force vectors to the velocity field
// stored in 'v' according to v(x,t+1) = v(x,t) + dt * f.
__global__ void
addForces_k(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r, size_t pitch)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
cData *fj = (cData *)((char *)v + (ty + spy) * pitch) + tx + spx;
cData vterm = *fj;
tx -= r;
ty -= r;
float s = 1.f / (1.f + tx*tx*tx*tx + ty*ty*ty*ty);
vterm.x += s * fx;
vterm.y += s * fy;
*fj = vterm;
}
// This method performs the velocity advection step, where we
// trace velocity vectors back in time to update each grid cell.
// That is, v(x,t+1) = v(p(x,-dt),t). Here we perform bilinear
// interpolation in the velocity space.
__global__ void
advectVelocity_k(cData *v, float *vx, float *vy,
int dx, int pdx, int dy, float dt, int lb, cudaTextureObject_t texObject)
{
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
cData vterm, ploc;
float vxterm, vyterm;
// gtidx is the domain location in x for this thread
if (gtidx < dx)
{
for (p = 0; p < lb; p++)
{
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy)
{
int fj = fi * pdx + gtidx;
vterm = tex2D<cData>(texObject, (float)gtidx, (float)fi);
ploc.x = (gtidx + 0.5f) - (dt * vterm.x * dx);
ploc.y = (fi + 0.5f) - (dt * vterm.y * dy);
vterm = tex2D<cData>(texObject, ploc.x, ploc.y);
vxterm = vterm.x;
vyterm = vterm.y;
vx[fj] = vxterm;
vy[fj] = vyterm;
}
}
}
}
// This method performs velocity diffusion and forces mass conservation
// in the frequency domain. The inputs 'vx' and 'vy' are complex-valued
// arrays holding the Fourier coefficients of the velocity field in
// X and Y. Diffusion in this space takes a simple form described as:
// v(k,t) = v(k,t) / (1 + visc * dt * k^2), where visc is the viscosity,
// and k is the wavenumber. The projection step forces the Fourier
// velocity vectors to be orthogonal to the vectors for each
// wavenumber: v(k,t) = v(k,t) - ((k dot v(k,t) * k) / k^2.
__global__ void
diffuseProject_k(cData *vx, cData *vy, int dx, int dy, float dt,
float visc, int lb)
{
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
cData xterm, yterm;
// gtidx is the domain location in x for this thread
if (gtidx < dx)
{
for (p = 0; p < lb; p++)
{
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy)
{
int fj = fi * dx + gtidx;
xterm = vx[fj];
yterm = vy[fj];
// Compute the index of the wavenumber based on the
// data order produced by a standard NN FFT.
int iix = gtidx;
int iiy = (fi>dy/2)?(fi-(dy)):fi;
// Velocity diffusion
float kk = (float)(iix * iix + iiy * iiy); // k^2
float diff = 1.f / (1.f + visc * dt * kk);
xterm.x *= diff;
xterm.y *= diff;
yterm.x *= diff;
yterm.y *= diff;
// Velocity projection
if (kk > 0.f)
{
float rkk = 1.f / kk;
// Real portion of velocity projection
float rkp = (iix * xterm.x + iiy * yterm.x);
// Imaginary portion of velocity projection
float ikp = (iix * xterm.y + iiy * yterm.y);
xterm.x -= rkk * rkp * iix;
xterm.y -= rkk * ikp * iix;
yterm.x -= rkk * rkp * iiy;
yterm.y -= rkk * ikp * iiy;
}
vx[fj] = xterm;
vy[fj] = yterm;
}
}
}
}
// This method updates the velocity field 'v' using the two complex
// arrays from the previous step: 'vx' and 'vy'. Here we scale the
// real components by 1/(dx*dy) to account for an unnormalized FFT.
__global__ void
updateVelocity_k(cData *v, float *vx, float *vy,
int dx, int pdx, int dy, int lb, size_t pitch)
{
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
float vxterm, vyterm;
cData nvterm;
// gtidx is the domain location in x for this thread
if (gtidx < dx)
{
for (p = 0; p < lb; p++)
{
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy)
{
int fjr = fi * pdx + gtidx;
vxterm = vx[fjr];
vyterm = vy[fjr];
// Normalize the result of the inverse FFT
float scale = 1.f / (dx * dy);
nvterm.x = vxterm * scale;
nvterm.y = vyterm * scale;
cData *fj = (cData *)((char *)v + fi * pitch) + gtidx;
*fj = nvterm;
}
} // If this thread is inside the domain in Y
} // If this thread is inside the domain in X
}
// This method updates the particles by moving particle positions
// according to the velocity field and time step. That is, for each
// particle: p(t+1) = p(t) + dt * v(p(t)).
__global__ void
advectParticles_k(cData *part, cData *v, int dx, int dy,
float dt, int lb, size_t pitch)
{
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
// gtidx is the domain location in x for this thread
cData pterm, vterm;
if (gtidx < dx)
{
for (p = 0; p < lb; p++)
{
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy)
{
int fj = fi * dx + gtidx;
pterm = part[fj];
int xvi = ((int)(pterm.x * dx));
int yvi = ((int)(pterm.y * dy));
vterm = *((cData *)((char *)v + yvi * pitch) + xvi);
pterm.x += dt * vterm.x;
pterm.x = pterm.x - (int)pterm.x;
pterm.x += 1.f;
pterm.x = pterm.x - (int)pterm.x;
pterm.y += dt * vterm.y;
pterm.y = pterm.y - (int)pterm.y;
pterm.y += 1.f;
pterm.y = pterm.y - (int)pterm.y;
part[fj] = pterm;
}
} // If this thread is inside the domain in Y
} // If this thread is inside the domain in X
}
// These are the external function calls necessary for launching fluid simuation
extern "C"
void addForces(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r)
{
dim3 tids(2*r+1, 2*r+1);
addForces_k<<<1, tids>>>(v, dx, dy, spx, spy, fx, fy, r, tPitch);
getLastCudaError("addForces_k failed.");
}
extern "C"
void advectVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt)
{
dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1));
dim3 tids(TIDSX, TIDSY);
updateTexture(v, DIM*sizeof(cData), DIM, tPitch);
advectVelocity_k<<<grid, tids>>>(v, vx, vy, dx, pdx, dy, dt, TILEY/TIDSY, texObj);
getLastCudaError("advectVelocity_k failed.");
}
extern "C"
void diffuseProject(cData *vx, cData *vy, int dx, int dy, float dt, float visc)
{
// Forward FFT
checkCudaErrors(cufftExecR2C(planr2c, (cufftReal *)vx, (cufftComplex *)vx));
checkCudaErrors(cufftExecR2C(planr2c, (cufftReal *)vy, (cufftComplex *)vy));
uint3 grid = make_uint3((dx/TILEX)+(!(dx%TILEX)?0:1),
(dy/TILEY)+(!(dy%TILEY)?0:1), 1);
uint3 tids = make_uint3(TIDSX, TIDSY, 1);
diffuseProject_k<<<grid, tids>>>(vx, vy, dx, dy, dt, visc, TILEY/TIDSY);
getLastCudaError("diffuseProject_k failed.");
// Inverse FFT
checkCudaErrors(cufftExecC2R(planc2r, (cufftComplex *)vx, (cufftReal *)vx));
checkCudaErrors(cufftExecC2R(planc2r, (cufftComplex *)vy, (cufftReal *)vy));
}
extern "C"
void updateVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy)
{
dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1));
dim3 tids(TIDSX, TIDSY);
updateVelocity_k<<<grid, tids>>>(v, vx, vy, dx, pdx, dy, TILEY/TIDSY, tPitch);
getLastCudaError("updateVelocity_k failed.");
}
extern "C"
void advectParticles(GLuint vbo, cData *v, int dx, int dy, float dt)
{
dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1));
dim3 tids(TIDSX, TIDSY);
cData *p;
checkCudaErrors(cudaGraphicsMapResources(1, &cuda_vbo_resource, 0));
getLastCudaError("cudaGraphicsMapResources failed");
size_t num_bytes;
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&p, &num_bytes,
cuda_vbo_resource));
getLastCudaError("cudaGraphicsResourceGetMappedPointer failed");
advectParticles_k<<<grid, tids>>>(p, v, dx, dy, dt, TILEY/TIDSY, tPitch);
getLastCudaError("advectParticles_k failed.");
checkCudaErrors(cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
}
|
c6ee375b00055775b8dc20fc0e1b3025cfa31c88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
} | c6ee375b00055775b8dc20fc0e1b3025cfa31c88.cu | #include "includes.h"
__global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
} |
ddf6a0c4e5772edfff5519e4248df0ef2987f83d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
__global__ void inclusive(const double *d_in,
double* d_out,
size_t size,
int inc) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < size) {
if (idx >= inc) {
d_out[idx] = d_in[idx] + d_in[idx - inc];
} else {
d_out[idx] = d_in[idx];
}
}
}
__global__ void subtract(double *d_scan,
double* d_diff,
size_t size) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < size) {
d_scan[idx] = d_scan[idx] - d_diff[idx];
}
}
void inclusive_gpu(double *d_in, double *d_out, size_t size) {
int inc = 1;
while(inc <= size) {
hipLaunchKernelGGL(( inclusive), dim3(1), dim3(size), 0, 0, d_in, d_out, size, inc);
hipDeviceSynchronize();
hipMemcpy(d_in, d_out, sizeof(double) * size, hipMemcpyDeviceToDevice);
inc *= 2;
}
}
std::vector<double> exclusive_scan(std::vector<double> &in) {
double *d_in, *d_out, *d_diff;
std::vector<double> h_out(in.size());
hipMalloc((void**)&d_in, sizeof(double) * in.size());
hipMalloc((void**)&d_out, sizeof(double) * in.size());
hipMalloc((void**)&d_diff, sizeof(double) * in.size());
hipMemcpy(&d_in[0], &in[0], sizeof(double) * in.size(), hipMemcpyHostToDevice);
hipMemcpy(&d_diff[0], &d_in[0], sizeof(double) * in.size(), hipMemcpyDeviceToDevice);
inclusive_gpu(d_in, d_out, in.size());
hipLaunchKernelGGL(( subtract), dim3(1), dim3(in.size()), 0, 0, d_out, d_diff, in.size());
hipMemcpy(&h_out[0], d_out, sizeof(double) * in.size(), hipMemcpyDeviceToHost);
return h_out;
}
std::vector<double> inclusive_scan(std::vector<double> &in) {
double *d_in, *d_out;
std::vector<double> h_out(in.size());
hipMalloc((void**)&d_in, sizeof(double) * in.size());
hipMalloc((void**)&d_out, sizeof(double) * in.size());
hipMemcpy(&d_in[0], &in[0], sizeof(double) * in.size(), hipMemcpyHostToDevice);
inclusive_gpu(d_in, d_out, in.size());
hipMemcpy(&h_out[0], d_out, sizeof(double) * in.size(), hipMemcpyDeviceToHost);
return h_out;
} | ddf6a0c4e5772edfff5519e4248df0ef2987f83d.cu | #include "kernels.h"
__global__ void inclusive(const double *d_in,
double* d_out,
size_t size,
int inc) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < size) {
if (idx >= inc) {
d_out[idx] = d_in[idx] + d_in[idx - inc];
} else {
d_out[idx] = d_in[idx];
}
}
}
__global__ void subtract(double *d_scan,
double* d_diff,
size_t size) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < size) {
d_scan[idx] = d_scan[idx] - d_diff[idx];
}
}
void inclusive_gpu(double *d_in, double *d_out, size_t size) {
int inc = 1;
while(inc <= size) {
inclusive<<<1, size>>>(d_in, d_out, size, inc);
cudaDeviceSynchronize();
cudaMemcpy(d_in, d_out, sizeof(double) * size, cudaMemcpyDeviceToDevice);
inc *= 2;
}
}
std::vector<double> exclusive_scan(std::vector<double> &in) {
double *d_in, *d_out, *d_diff;
std::vector<double> h_out(in.size());
cudaMalloc((void**)&d_in, sizeof(double) * in.size());
cudaMalloc((void**)&d_out, sizeof(double) * in.size());
cudaMalloc((void**)&d_diff, sizeof(double) * in.size());
cudaMemcpy(&d_in[0], &in[0], sizeof(double) * in.size(), cudaMemcpyHostToDevice);
cudaMemcpy(&d_diff[0], &d_in[0], sizeof(double) * in.size(), cudaMemcpyDeviceToDevice);
inclusive_gpu(d_in, d_out, in.size());
subtract<<<1, in.size()>>>(d_out, d_diff, in.size());
cudaMemcpy(&h_out[0], d_out, sizeof(double) * in.size(), cudaMemcpyDeviceToHost);
return h_out;
}
std::vector<double> inclusive_scan(std::vector<double> &in) {
double *d_in, *d_out;
std::vector<double> h_out(in.size());
cudaMalloc((void**)&d_in, sizeof(double) * in.size());
cudaMalloc((void**)&d_out, sizeof(double) * in.size());
cudaMemcpy(&d_in[0], &in[0], sizeof(double) * in.size(), cudaMemcpyHostToDevice);
inclusive_gpu(d_in, d_out, in.size());
cudaMemcpy(&h_out[0], d_out, sizeof(double) * in.size(), cudaMemcpyDeviceToHost);
return h_out;
} |
9cd778e2ee33e7ca78b6d3625ee5a70acb776e50.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef FUNCTION_CU
#define FUNCTION_CU 1
#include "function.cuh"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
__device__ __host__ int AddrLookup<Size3D, Point3D>::look(Point3D point) const {
return (point.x +
(point.y * s.width) +
(point.z * s.width * s.height));
}
/*
* Returns the weight of a cube vertex for the 3D interpolation.
* The cube has 8 points, they can be addressed with an {0,1}^3 vector.
* 1D interpolation: x0=0, x1=1; we have a value phi(x0) and phi(x1).
* We want to compute the first approximation of phi(x): ~phi(x).
* (Classic linear interpolation). Find the line l(_x)=a*_x+b that fullfils the
* Requirement: l(x0)=phi(x0) and l(x1)=phi(x1), so:
* b=phi(x0)
* a=dy/dx=[phi(x1)-phi(x0)]/(x1-x0)
* So: l(x)={[phi(x1)-phi(x0)]/(x1-x0)}*x+phi(x0)
* Since x1=1 and x0=0: l(x)=[phi(x1)-phi(x0)]*x+phi(x0)
* We want to expess the l(x) in terms of x0 and x1, so find the weights of x0 and x1!
* [phi(x1)-phi(x0)]*x+phi(x0)=t1*phi(x0)+t2*phi(x1)
* phi(x1)*x - phi(x0)*x + phi(x0)=t1*phi(x0)+t2*phi(x1)
*
* Then we have:
* 1) phi(x1)*x = t2*phi(x1)
* 2) - phi(x0)*x + phi(x0) = t1*phi(x0)
*
* From the 1): t2=x
* From the 2):
* - phi(x0)*x + phi(x0)
* phi(x0)*(-1*x + 1)
* t1=(-1*x + 1)=1-x
*
* So: we get the interpolated value ~phi(x)=e(x)=t1*phi(x0)+t2*phi(x1)=(1-x)*phi(x0)+x*phi(x1)
*
* That means, if we want to compute the interpolated value in x (between the point x0 and x1), we have to count
* the value at x0 with weight (1-x) and x1 with x.
* Weights of x0 and x1:
* x0 -> 1-x
* x1 -> x
*
* The edge endpoint is indexed with 0 (x0) and 1 (x1):
* 0 -> 1-x
* 1 -> x
*
* A function that returns the edge weight for the endpoints:
* w(p, x) <- -x+1 + p*(2x-1)
*
* That is: w(0,x) returns -x+1 and w(1,x) returns x.
*
* In 2D:
* (0,0) -> (1-x,1-y)
* (0,1) -> (1-x,y)
* (1,0) -> (x,1-y)
* (1,1) -> (x,y)
*
* ~phi(x,y) = phi(0,0)*(1-x)*(1-y) + phi(0,1)*(1-x)*y + phi(1,0)*x*(1-y) + phi(1,1)*x*y
* So, in 2D we have to compute the area of the squares, and for this reason, we have to
* multiply the elements of the resulting pairs.
*
* The linear interpolation can be easily extended to 3D and the w function becomes:
* w(p,x) <- mul(-x+ONES(3) + hp(p, 2*x-ONES(3))),
*
* where the hp is the Hadamard (pointwise) product of vectors,
* and mul multiplies the elemnts of a vector.
*
* Params: cubeVertex=p, cubePoint=x
*/
__device__ __host__ float getVertexWeight(int3 cubeVertex, float3 localCubePt){
float3 mxp1 = (-1*localCubePt)+FONES3;
float3 res =
(mxp1 +
hp(
make_float3(cubeVertex),
((2*localCubePt)-FONES3)
)
);
return mul(res);
}
/*
* Interpolates the value of a point in 3D based on the function passed in the parameter levelSet.
* pt: the global coordinate of the point.
* First, the function determines the reference point ptRef and trilinearly interpolates the values
* to pt from the eight point refPt+{0,1}^3 with the appropriate weights.
*/
__device__ __host__ float interpolate3D(const float3 pt, const Func3D<float>& func){
int3 cubeRefPt = make_int3(pt);
float3 localCubePt = pt-make_float3(cubeRefPt);
float result = 0.0f;
for(int i = 0; i <=1; i++){
for(int j = 0; j <= 1; j++){
for(int k = 0; k <= 1; k++){
int3 cubeVertexOff = make_int3(i, j, k);
float vertexWeight = getVertexWeight(cubeVertexOff, localCubePt);
result += vertexWeight*func[Point3D(cubeRefPt+cubeVertexOff)];
}
}
}
return result;
}
__device__ __host__ float3 interpolate3D(const float3 pt, const Func3D<float3>& func){
int3 cubeRefPt = make_int3(pt);
float3 localCubePt = pt-make_float3(cubeRefPt);
float3 result = FZEROS3;
for(int i = 0; i <=1; i++){
for(int j = 0; j <= 1; j++){
for(int k = 0; k <= 1; k++){
int3 cubeVertexOff = make_int3(i, j, k);
float vertexWeight = getVertexWeight(cubeVertexOff, localCubePt);
result += vertexWeight*func[Point3D(cubeRefPt+cubeVertexOff)];
}
}
}
return result;
}
#endif
| 9cd778e2ee33e7ca78b6d3625ee5a70acb776e50.cu | #ifndef FUNCTION_CU
#define FUNCTION_CU 1
#include "function.cuh"
#include "cuda.h"
#include "cuda_runtime.h"
__device__ __host__ int AddrLookup<Size3D, Point3D>::look(Point3D point) const {
return (point.x +
(point.y * s.width) +
(point.z * s.width * s.height));
}
/*
* Returns the weight of a cube vertex for the 3D interpolation.
* The cube has 8 points, they can be addressed with an {0,1}^3 vector.
* 1D interpolation: x0=0, x1=1; we have a value phi(x0) and phi(x1).
* We want to compute the first approximation of phi(x): ~phi(x).
* (Classic linear interpolation). Find the line l(_x)=a*_x+b that fullfils the
* Requirement: l(x0)=phi(x0) and l(x1)=phi(x1), so:
* b=phi(x0)
* a=dy/dx=[phi(x1)-phi(x0)]/(x1-x0)
* So: l(x)={[phi(x1)-phi(x0)]/(x1-x0)}*x+phi(x0)
* Since x1=1 and x0=0: l(x)=[phi(x1)-phi(x0)]*x+phi(x0)
* We want to expess the l(x) in terms of x0 and x1, so find the weights of x0 and x1!
* [phi(x1)-phi(x0)]*x+phi(x0)=t1*phi(x0)+t2*phi(x1)
* phi(x1)*x - phi(x0)*x + phi(x0)=t1*phi(x0)+t2*phi(x1)
*
* Then we have:
* 1) phi(x1)*x = t2*phi(x1)
* 2) - phi(x0)*x + phi(x0) = t1*phi(x0)
*
* From the 1): t2=x
* From the 2):
* - phi(x0)*x + phi(x0)
* phi(x0)*(-1*x + 1)
* t1=(-1*x + 1)=1-x
*
* So: we get the interpolated value ~phi(x)=e(x)=t1*phi(x0)+t2*phi(x1)=(1-x)*phi(x0)+x*phi(x1)
*
* That means, if we want to compute the interpolated value in x (between the point x0 and x1), we have to count
* the value at x0 with weight (1-x) and x1 with x.
* Weights of x0 and x1:
* x0 -> 1-x
* x1 -> x
*
* The edge endpoint is indexed with 0 (x0) and 1 (x1):
* 0 -> 1-x
* 1 -> x
*
* A function that returns the edge weight for the endpoints:
* w(p, x) <- -x+1 + p*(2x-1)
*
* That is: w(0,x) returns -x+1 and w(1,x) returns x.
*
* In 2D:
* (0,0) -> (1-x,1-y)
* (0,1) -> (1-x,y)
* (1,0) -> (x,1-y)
* (1,1) -> (x,y)
*
* ~phi(x,y) = phi(0,0)*(1-x)*(1-y) + phi(0,1)*(1-x)*y + phi(1,0)*x*(1-y) + phi(1,1)*x*y
* So, in 2D we have to compute the area of the squares, and for this reason, we have to
* multiply the elements of the resulting pairs.
*
* The linear interpolation can be easily extended to 3D and the w function becomes:
* w(p,x) <- mul(-x+ONES(3) + hp(p, 2*x-ONES(3))),
*
* where the hp is the Hadamard (pointwise) product of vectors,
* and mul multiplies the elemnts of a vector.
*
* Params: cubeVertex=p, cubePoint=x
*/
__device__ __host__ float getVertexWeight(int3 cubeVertex, float3 localCubePt){
float3 mxp1 = (-1*localCubePt)+FONES3;
float3 res =
(mxp1 +
hp(
make_float3(cubeVertex),
((2*localCubePt)-FONES3)
)
);
return mul(res);
}
/*
* Interpolates the value of a point in 3D based on the function passed in the parameter levelSet.
* pt: the global coordinate of the point.
* First, the function determines the reference point ptRef and trilinearly interpolates the values
* to pt from the eight point refPt+{0,1}^3 with the appropriate weights.
*/
__device__ __host__ float interpolate3D(const float3 pt, const Func3D<float>& func){
int3 cubeRefPt = make_int3(pt);
float3 localCubePt = pt-make_float3(cubeRefPt);
float result = 0.0f;
for(int i = 0; i <=1; i++){
for(int j = 0; j <= 1; j++){
for(int k = 0; k <= 1; k++){
int3 cubeVertexOff = make_int3(i, j, k);
float vertexWeight = getVertexWeight(cubeVertexOff, localCubePt);
result += vertexWeight*func[Point3D(cubeRefPt+cubeVertexOff)];
}
}
}
return result;
}
__device__ __host__ float3 interpolate3D(const float3 pt, const Func3D<float3>& func){
int3 cubeRefPt = make_int3(pt);
float3 localCubePt = pt-make_float3(cubeRefPt);
float3 result = FZEROS3;
for(int i = 0; i <=1; i++){
for(int j = 0; j <= 1; j++){
for(int k = 0; k <= 1; k++){
int3 cubeVertexOff = make_int3(i, j, k);
float vertexWeight = getVertexWeight(cubeVertexOff, localCubePt);
result += vertexWeight*func[Point3D(cubeRefPt+cubeVertexOff)];
}
}
}
return result;
}
#endif
|
d84321bc97114becf8ea8db79d0a3ad9713979e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "brick-cuda.h"
#include "head.h"
#include "headcu.h"
#define out(i, j, k) out_arr[k][j][i]
#define in(i, j, k) in_arr[k][j][i]
__global__ void
arr_kernel(bElem *in_ptr, bElem *out_ptr, bElem *c) {
auto in_arr = (bElem (*)[STRIDE][STRIDE]) in_ptr;
auto out_arr = (bElem (*)[STRIDE][STRIDE]) out_ptr;
#include "arrcusched.h"
{
#include "kernel.h"
}
}
#undef out
#undef in
__global__ void
brick_kernel(unsigned (*grid)[STRIDE/TILEJ][STRIDE/TILEI], Brick3D in, Brick3D out, bElem *c) {
#include "bricusched.h"
brick("kernel.py", BVEC, (TILEK, TILEJ, TILEI), (BFOLD), b);
}
int main() {
// allocations
bElem *c = randomArray({25});
bElem *c_dev;
copyToDevice({25}, c_dev, c);
auto in_arr = randomArray({STRIDE, STRIDE, STRIDE});
bElem *in_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, in_dev, in_arr);
auto out_arr = zeroArray({STRIDE, STRIDE, STRIDE});
bElem *out_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, out_dev, out_arr);
{
auto compute = [&]() -> void {
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(_TILEI, _TILEJ, _TILEK);
hipLaunchKernelGGL(( arr_kernel), dim3(block), dim3(thread) , 0, 0, in_dev, out_dev, c_dev);
};
#ifndef TYPE
#include "cutiming.h"
#else
compute();
#endif
copyFromDevice({STRIDE, STRIDE, STRIDE}, out_arr, out_dev);
}
#if TYPE == 1
{
unsigned *grid_ptr;
unsigned bSize = TILEK * TILEJ * TILEI;
auto bInfo = init_grid<3>(grid_ptr, {STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI});
unsigned *grid_dev;
copyToDevice({STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI}, grid_dev, grid_ptr);
auto bStorage = BrickStorage::allocate(bInfo.nbricks, bSize * 2);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> in_bri(&bInfo, &bStorage, 0);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> out_bri(&bInfo, &bStorage, bSize);
BrickInfo<3> *bInfo_dev;
auto _bInfo_dev = movBrickInfo(bInfo, hipMemcpyHostToDevice);
{
unsigned size = sizeof(BrickInfo<3>);
hipMalloc(&bInfo_dev, size);
hipMemcpy(bInfo_dev, &_bInfo_dev, size, hipMemcpyHostToDevice);
}
copyBrick<3>({STRIDE, STRIDE, STRIDE}, in_arr, grid_ptr, in_bri);
BrickStorage *bStorage_dev;
BrickStorage _bStorage_dev = movBrickStorage(bStorage, hipMemcpyHostToDevice);
{
unsigned size = sizeof(BrickStorage);
hipMalloc(&bStorage_dev, size);
hipMemcpy(bStorage_dev, &_bStorage_dev, size, hipMemcpyHostToDevice);
}
auto compute = [&]() -> void {
Brick3D bIn(bInfo_dev, &_bStorage_dev, 0);
Brick3D bOut(bInfo_dev, &_bStorage_dev, bSize);
bIn.bStorage = bStorage_dev;
bOut.bStorage = bStorage_dev;
auto grid = (unsigned (*)[STRIDE/TILEJ][STRIDE/TILEI]) grid_dev;
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(32);
hipLaunchKernelGGL(( brick_kernel), dim3(block), dim3(thread) , 0, 0, grid, bIn, bOut, c_dev);
};
#include "cutiming.h"
hipDeviceSynchronize();
hipMemcpy(bStorage.dat, _bStorage_dev.dat, bStorage.chunks * bStorage.step * sizeof(bElem), hipMemcpyDeviceToHost);
if (!compareBrick<3>({STRIDE, STRIDE, STRIDE}, out_arr, grid_ptr, out_bri))
return 1;
}
#endif
return 0;
}
| d84321bc97114becf8ea8db79d0a3ad9713979e6.cu | #include "brick-cuda.h"
#include "head.h"
#include "headcu.h"
#define out(i, j, k) out_arr[k][j][i]
#define in(i, j, k) in_arr[k][j][i]
__global__ void
arr_kernel(bElem *in_ptr, bElem *out_ptr, bElem *c) {
auto in_arr = (bElem (*)[STRIDE][STRIDE]) in_ptr;
auto out_arr = (bElem (*)[STRIDE][STRIDE]) out_ptr;
#include "arrcusched.h"
{
#include "kernel.h"
}
}
#undef out
#undef in
__global__ void
brick_kernel(unsigned (*grid)[STRIDE/TILEJ][STRIDE/TILEI], Brick3D in, Brick3D out, bElem *c) {
#include "bricusched.h"
brick("kernel.py", BVEC, (TILEK, TILEJ, TILEI), (BFOLD), b);
}
int main() {
// allocations
bElem *c = randomArray({25});
bElem *c_dev;
copyToDevice({25}, c_dev, c);
auto in_arr = randomArray({STRIDE, STRIDE, STRIDE});
bElem *in_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, in_dev, in_arr);
auto out_arr = zeroArray({STRIDE, STRIDE, STRIDE});
bElem *out_dev;
copyToDevice({STRIDE, STRIDE, STRIDE}, out_dev, out_arr);
{
auto compute = [&]() -> void {
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(_TILEI, _TILEJ, _TILEK);
arr_kernel<<< block, thread >>>(in_dev, out_dev, c_dev);
};
#ifndef TYPE
#include "cutiming.h"
#else
compute();
#endif
copyFromDevice({STRIDE, STRIDE, STRIDE}, out_arr, out_dev);
}
#if TYPE == 1
{
unsigned *grid_ptr;
unsigned bSize = TILEK * TILEJ * TILEI;
auto bInfo = init_grid<3>(grid_ptr, {STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI});
unsigned *grid_dev;
copyToDevice({STRIDE/TILEK, STRIDE/TILEJ, STRIDE/TILEI}, grid_dev, grid_ptr);
auto bStorage = BrickStorage::allocate(bInfo.nbricks, bSize * 2);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> in_bri(&bInfo, &bStorage, 0);
Brick<Dim<TILEK, TILEJ, TILEI>, Dim<BFOLD>> out_bri(&bInfo, &bStorage, bSize);
BrickInfo<3> *bInfo_dev;
auto _bInfo_dev = movBrickInfo(bInfo, cudaMemcpyHostToDevice);
{
unsigned size = sizeof(BrickInfo<3>);
cudaMalloc(&bInfo_dev, size);
cudaMemcpy(bInfo_dev, &_bInfo_dev, size, cudaMemcpyHostToDevice);
}
copyBrick<3>({STRIDE, STRIDE, STRIDE}, in_arr, grid_ptr, in_bri);
BrickStorage *bStorage_dev;
BrickStorage _bStorage_dev = movBrickStorage(bStorage, cudaMemcpyHostToDevice);
{
unsigned size = sizeof(BrickStorage);
cudaMalloc(&bStorage_dev, size);
cudaMemcpy(bStorage_dev, &_bStorage_dev, size, cudaMemcpyHostToDevice);
}
auto compute = [&]() -> void {
Brick3D bIn(bInfo_dev, &_bStorage_dev, 0);
Brick3D bOut(bInfo_dev, &_bStorage_dev, bSize);
bIn.bStorage = bStorage_dev;
bOut.bStorage = bStorage_dev;
auto grid = (unsigned (*)[STRIDE/TILEJ][STRIDE/TILEI]) grid_dev;
dim3 block(N/TILEI, N/TILEJ, N/TILEK), thread(32);
brick_kernel<<< block, thread >>>(grid, bIn, bOut, c_dev);
};
#include "cutiming.h"
cudaDeviceSynchronize();
cudaMemcpy(bStorage.dat, _bStorage_dev.dat, bStorage.chunks * bStorage.step * sizeof(bElem), cudaMemcpyDeviceToHost);
if (!compareBrick<3>({STRIDE, STRIDE, STRIDE}, out_arr, grid_ptr, out_bri))
return 1;
}
#endif
return 0;
}
|
5daefcd8966c58f3351f073bdbbb2bca8016ac82.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "TestFillEnsemble.h"
#include "TestPgcd.h"
#include "TestStructure.h"
int main(){
int val = 62;
int valx = 44;
int valy = 32;
/*testInitEns();
printf("Test InitEns -> passed\n");
testAddVal(val);
printf("Test AddVal -> passed\n");
testAddCouple(valx,valy);
printf("Test AddCouple -> passed\n");
TestIsBSmooth();
printf("Test BSmooth -> passed\n");
TestIsInEnsemble();
printf("Test IsInEnsemble -> passed\n");
TestIsInf();
printf("Test isinf -> passed\n");
TestIsBSmoothG();
printf("Test BSmoothG -> passed\n");
TestIsInEnsembleG();
printf("Test IsInEnsembleG -> passed\n");
TestfillEnsemble();
printf("Test fillEnsemble -> passed\n");*/
TestfillEnsembleG();
printf("Test fillEnsembleG -> passed\n");
TestPgcd();
printf("Test Pgcd -> passed\n");
return 0;
}
| 5daefcd8966c58f3351f073bdbbb2bca8016ac82.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "TestFillEnsemble.h"
#include "TestPgcd.h"
#include "TestStructure.h"
int main(){
int val = 62;
int valx = 44;
int valy = 32;
/*testInitEns();
printf("Test InitEns -> passed\n");
testAddVal(val);
printf("Test AddVal -> passed\n");
testAddCouple(valx,valy);
printf("Test AddCouple -> passed\n");
TestIsBSmooth();
printf("Test BSmooth -> passed\n");
TestIsInEnsemble();
printf("Test IsInEnsemble -> passed\n");
TestIsInf();
printf("Test isinf -> passed\n");
TestIsBSmoothG();
printf("Test BSmoothG -> passed\n");
TestIsInEnsembleG();
printf("Test IsInEnsembleG -> passed\n");
TestfillEnsemble();
printf("Test fillEnsemble -> passed\n");*/
TestfillEnsembleG();
printf("Test fillEnsembleG -> passed\n");
TestPgcd();
printf("Test Pgcd -> passed\n");
return 0;
}
|
printDeviceSpec.hip | // !!! This is a file automatically generated by hipify!!!
void printDeviceSpec()
{
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++)
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" SharedMemPerBlock: %d\n", prop.sharedMemPerBlock);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
printf(" Total global memory (bits): %d\n", prop.totalGlobalMem);
printf("\n");
}
}
| printDeviceSpec.cu | void printDeviceSpec()
{
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" SharedMemPerBlock: %d\n", prop.sharedMemPerBlock);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
printf(" Total global memory (bits): %d\n", prop.totalGlobalMem);
printf("\n");
}
}
|
3ebcbe3e84c3f84d0b73ce2349d997b904302f28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void fatal(char *s) {
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file) {
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++) {
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file) {
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++) {
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed) {
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)) {
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed) {
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \
int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed);
}
return dst;
}
void usage(int argc, char **argv) {
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv) {
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv) {
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice);
hipMalloc((void**)&MatrixPower, sizeof(float)*size);
hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
printf("Ending simulation\n");
hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost);
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
hipFree(MatrixPower);
hipFree(MatrixTemp[0]);
hipFree(MatrixTemp[1]);
free(MatrixOut);
}
| 3ebcbe3e84c3f84d0b73ce2349d997b904302f28.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
void fatal(char *s) {
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file) {
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++) {
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file) {
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++) {
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed) {
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)) {
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed) {
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \
int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed);
}
return dst;
}
void usage(int argc, char **argv) {
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv) {
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv) {
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&MatrixPower, sizeof(float)*size);
cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
printf("Ending simulation\n");
cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost);
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
cudaFree(MatrixPower);
cudaFree(MatrixTemp[0]);
cudaFree(MatrixTemp[1]);
free(MatrixOut);
}
|
c041923e11247833b9eb7cde604e69c52e79bf74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/WrapDimUtils.h>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <c10/macros/Macros.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/NumericLimits.cuh>
#include <type_traits>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <ATen/native/hip/PersistentSoftmax.cuh>
namespace at {
namespace native {
namespace {
constexpr int ALIGN_BYTES = 16;
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: max_input(max_input), logsum(::log(sum)) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - max_input - logsum);
}
const AccumT max_input;
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - ::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxForwardEpilogue {
__device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: max_input(max_input)
, sum(sum) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(::exp(input - max_input) / sum);
}
const AccumT max_input;
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxBackwardEpilogue {
__device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
// XXX: gradOutput that we get here is really gradOutput * output
// Look for cmul in SoftMax_updateGradInput
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - output * sum);
}
const AccumT sum;
};
////////////////////////////////////////////////////////////////////////////////
// Spatial kernel (fast with large inner_size and small dim_size)
////////////////////////////////////////////////////////////////////////////////
// Let's assume that our input has been flattened to have only three dimension:
// outer x dim x inner
// The spatial algorithm tries to parallelize along all of them.
// Within a 2d block threadIdx.y parallelizes over dim slices, and threads that
// share it will speed up reductions over dim (along axis x).
// The 2d grid is used to parallelize inner dimension over y axis and outer over x.
inline dim3 SpatialSoftMax_getGridSize(
dim3 block, uint32_t max_active_blocks,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
// First, tile as many blocks as we can over the y axis
uint32_t inner_blocks = (inner_size + block.y - 1) / block.y;
if (inner_blocks > max_active_blocks)
inner_blocks = max_active_blocks;
// Fill the x axis with as many blocks as we can fit (a little more is ok too)
uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks;
if (outer_blocks > outer_size)
outer_blocks = outer_size;
return dim3(outer_blocks, inner_blocks);
}
const int max_threads = 1024;
inline dim3 SpatialSoftMax_getBlockSize(
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
uint32_t inner_threads = inner_size;
inner_threads = ::min(inner_threads, static_cast<uint32_t>(max_threads));
uint32_t dim_threads = 1;
if (inner_threads <= 64 && dim_size >= 64) {
while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size)
dim_threads *= 2;
dim_threads /= 2;
}
return dim3(dim_threads, inner_threads);
}
template<typename accscalar_t, typename Kernel>
void SpatialSoftMax_getLaunchSizes(
Kernel k,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size,
dim3& grid, dim3& block, uint32_t& smem_size) {
block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size);
uint32_t block_threads = block.x * block.y;
smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t);
int max_active_blocks;
#if defined(__HIP_PLATFORM_HCC__) && HIP_VERSION < 305
// HIP function signature is not compatible yet.
uint32_t max_blocks;
hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks,
k, block_threads, smem_size);
max_active_blocks = max_blocks;
#else
hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks,
k, block_threads, smem_size);
#endif
max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size);
}
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = ::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
// In the vectorized case we want to trade off allowing more of the buffers to be accessed
// in a vectorized way against wanting a larger block size to get better utilisation.
// In general with ILP you can have (ILP-1)/ILP of the buffer accessed vectorised, at the risk
// of having a very small block size. We choose to keep >= 1/2 of the buffer vectorised while
// allowing a larger block size.
if (ILP > 1) {
max_block_size /= 2;
}
while (block_size < (max_block_size)) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = ::max(block_size, static_cast<uint64_t>(C10_WARP_SIZE));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
// Note that it's not a complete block-wide reduction.
// Only threads that share threadIdx.y reduce values.
template<typename T, template<typename> class ReduceOp>
__forceinline__ __device__
T spatialBlockReduceX(T *shared, T val) {
ReduceOp<T> r;
shared += threadIdx.y * blockDim.x;
__syncthreads();
shared[threadIdx.x] = val;
// NOTE: loop starts with __syncthreads()
int offset = blockDim.x / 2;
while (offset > 0) {
__syncthreads();
if (threadIdx.x < offset)
shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]);
offset /= 2;
}
__syncthreads();
return shared[0];
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxForward(
outscalar_t *output, scalar_t *input,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
////////////////////////////////////////////////////////////
// These two blocks are really equivalent, but specializing on
// blockDim.x == 1 makes the kernel faster when it's unused.
// I didn't want to thread an extra template parameter, and nvcc
// seems to be smart enough to hoist the if outside of the loops.
////////////////////////////////////////////////////////////
if (blockDim.x > 1) {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input);
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
} else {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
}
}
}
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxBackward(
scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
// See the comment in forward kernel
if (blockDim.x > 1) {
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += gradOutput[data_offset + d * dim_stride];
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
} else {
accscalar_t sum = 0;
for (uint32_t d = 0; d < dim_size; d++)
sum += gradOutput[data_offset + d * dim_stride];
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = 0; d < dim_size; d++) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + ::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / C10_WARP_SIZE)) - 1;
if (threadIdx.x < C10_WARP_SIZE) {
int lane = threadIdx.x % C10_WARP_SIZE;
if (lane < blockDim.x / C10_WARP_SIZE) {
#pragma unroll
for (int i = 0; i < C10_WARP_SIZE; ++i) {
warpVal = r(warpVal, smem[lane * C10_WARP_SIZE + i]);
}
#ifndef __HIP_PLATFORM_HCC__
__syncwarp(mask);
#endif
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(int shift,
T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
using LoadT = at::native::memory::aligned_vector<T, ILP>;
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal = r(threadVal, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
threadVal = r(threadVal, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
/**
* This will apply the Epilogue with vectorized reads & writes when input & output have the same shift
*/
template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__device__ __forceinline__ void
WriteFpropResultsVectorized(
int size,
const int shift,
scalar_t *input,
outscalar_t *output,
Epilogue<scalar_t, accum_t, outscalar_t> epilogue) {
using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>;
using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>;
int offset = threadIdx.x;
// if unaligned, do one value / thread and move on, guaranteeing aligned reads/writes later
if (shift > 0) {
input -= shift;
output -= shift;
size += shift;
if (threadIdx.x >= shift) {
output[offset] = epilogue(input[offset]);
}
size -= blockDim.x;
input += blockDim.x;
output += blockDim.x;
}
const int last = size % (ILP * blockDim.x);
scalar_t in_v[ILP];
LoadT* in_value = reinterpret_cast<LoadT*>(&in_v);
outscalar_t out_v[ILP];
StoreT* out_value = reinterpret_cast<StoreT*>(&out_v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*in_value = reinterpret_cast<LoadT*>(input)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
out_v[j] = epilogue(in_v[j]);
}
reinterpret_cast<StoreT*>(output)[offset] = *out_value;
}
offset = size - last + threadIdx.x;
// handle the tail
for (; offset < size; offset += blockDim.x) {
output[offset] = epilogue(input[offset]);
}
}
template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__device__ __forceinline__ void
WriteBpropResultsVectorized(
int size,
const int shift,
scalar_t *gradInput,
outscalar_t *output,
outscalar_t *gradOutput,
Epilogue<scalar_t, accum_t, outscalar_t> epilogue) {
using gradInputT = at::native::memory::aligned_vector<scalar_t, ILP>;
using outputT = at::native::memory::aligned_vector<outscalar_t, ILP>;
int offset = threadIdx.x;
// if unaligned, do one value / thread and move on, guaranteeing aligned reads/writes later
if (shift > 0) {
gradInput -= shift;
output -= shift;
gradOutput -= shift;
size += shift;
if (threadIdx.x >= shift) {
gradInput[offset] = epilogue(gradOutput[offset], output[offset]);
}
size -= blockDim.x;
gradInput += blockDim.x;
output += blockDim.x;
gradOutput += blockDim.x;
}
const int last = size % (ILP * blockDim.x);
scalar_t dX[ILP];
gradInputT *dX_v = reinterpret_cast<gradInputT*>(&dX);
outscalar_t Y[ILP];
outputT *Y_v = reinterpret_cast<outputT*>(&Y);
outscalar_t dY[ILP];
outputT *dY_v = reinterpret_cast<outputT*>(&dY);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*Y_v = reinterpret_cast<outputT*>(output)[offset];
*dY_v = reinterpret_cast<outputT*>(gradOutput)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
dX[j] = epilogue(dY[j], Y[j]);
}
reinterpret_cast<gradInputT*>(gradInput)[offset] = *dX_v;
}
offset = size - last + threadIdx.x;
for (; offset < size; offset += blockDim.x) {
gradInput[offset] = epilogue(gradOutput[offset], output[offset]);
}
}
/**
* This will apply the Epilogue with non-vectrorized reads & writes for the general case
*/
template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__device__ __forceinline__ void
WriteFpropResults(
int classes,
scalar_t *input,
outscalar_t *output,
Epilogue<scalar_t, accum_t, outscalar_t> epilogue) {
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
// Main bulk of loop with ILP
for (; offset < classes - last; offset += blockDim.x * ILP) {
scalar_t tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmp[j] = input[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j) {
output[offset + j * blockDim.x] = epilogue(tmp[j]);
}
}
// Remainder - no ILP
for (; offset < classes; offset += blockDim.x) {
output[offset] = epilogue(input[offset]);
}
}
template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__device__ __forceinline__ void
WriteBpropResults(
int classes,
scalar_t *gradInput,
outscalar_t *output,
outscalar_t *gradOutput,
Epilogue<scalar_t, accum_t, outscalar_t> epilogue) {
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
outscalar_t tmpOutput[ILP];
outscalar_t tmpGradOutput[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpOutput[j] = output[offset + j * blockDim.x];
tmpGradOutput[j] = gradOutput[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j) {
gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]);
}
}
// Remainder - no ILP
for (; offset < classes; offset += blockDim.x) {
gradInput[offset] = epilogue(gradOutput[offset], output[offset]);
}
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>;
using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>;
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
output += blockIdx.x * classes;
const int shift = ((uint64_t)input) % ALIGN_BYTES / sizeof(scalar_t);
const int output_shift = ((uint64_t)output) % ALIGN_BYTES / sizeof(outscalar_t);
// find the max
accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>(
shift, input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
accscalar_t max_k = blockReduce<Max, accscalar_t>(
sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
// reduce all values
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(
shift, input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
if (shift == output_shift) {
WriteFpropResultsVectorized<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, shift, input, output, epilogue);
} else {
WriteFpropResults<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, input, output, epilogue);
}
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes)
{
using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>;
using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>;
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
gradInput += blockIdx.x * classes;
output += blockIdx.x * classes;
gradOutput += blockIdx.x * classes;
const int shift = ((uint64_t)gradInput) % ALIGN_BYTES / sizeof(scalar_t);
const int output_shift = ((uint64_t)output) % ALIGN_BYTES / sizeof(outscalar_t);
const int grad_output_shift = ((uint64_t)gradOutput) % ALIGN_BYTES / sizeof(outscalar_t);
accscalar_t threadSum = ilpReduce<AddFloat, ILP, outscalar_t, accscalar_t>(
grad_output_shift, gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0));
accscalar_t sum_k = blockReduce<Add, accscalar_t>(
sdata, threadSum, Add<accscalar_t>(), accscalar_t(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k);
if (shift == output_shift && shift == grad_output_shift) {
WriteBpropResultsVectorized<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, shift, gradInput, output, gradOutput, epilogue);
} else {
WriteBpropResults<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, gradInput, output, gradOutput, epilogue);
}
}
template<template<typename, typename, typename> class Epilogue, bool is_log_softmax>
Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){
if (half_to_float) {
TORCH_CHECK(input_.scalar_type() == ScalarType::Half, "conversion is supported for Half type only");
}
auto input = input_.contiguous();
Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (input.dim() == 0) input = input.view(1);
int64_t dim = maybe_wrap_dim(dim_, input.dim());
TORCH_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions");
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
if (input.numel() > 0) {
int64_t inner_size = 1;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
if (inner_size == 1) {
dim3 grid(outer_size);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
auto output_ptr = output.data_ptr<scalar_t>();
auto input_ptr = input.data_ptr<scalar_t>();
int64_t remaining = outer_size;
int64_t chunk_size = (1<<31 - 1) / dim_size;
while(remaining > 0) {
dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, is_log_softmax>(
output_ptr, input_ptr, dim_size, dim_size, std::min<int64_t>(remaining, chunk_size));
input_ptr += chunk_size * dim_size;
output_ptr += chunk_size * dim_size;
remaining -= chunk_size;
}
} else {
constexpr int ILP = sizeof(float4) / sizeof(scalar_t);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} else {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
auto output_ptr = output.data_ptr<accscalar_t>();
auto input_ptr = input.data_ptr<scalar_t>();
int64_t remaining = outer_size;
int64_t chunk_size = (1<<31 - 1) / dim_size;
while(remaining > 0) {
dispatch_softmax_forward<scalar_t, accscalar_t, accscalar_t, is_log_softmax>(
output_ptr, input_ptr, dim_size, dim_size, std::min<int64_t>(remaining, chunk_size));
input_ptr += chunk_size * dim_size;
output_ptr += chunk_size * dim_size;
remaining -= chunk_size;
}
} else {
constexpr int ILP = sizeof(float4) / sizeof(accscalar_t);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
});
// This kernel runs in a 2D grid, where each application along y dimension has a fixed
// outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size.
// Reductions over dim are done in a single-threaded manner.
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
}
}
return output;
}
template<template<typename, typename, typename> class Epilogue, bool is_log_softmax>
Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){
int64_t dim = maybe_wrap_dim(dim_, grad_.dim());
Tensor gI = half_to_float ? at::empty_like(grad_, grad_.options().dtype(ScalarType::Half), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(grad_, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (grad_.numel() == 0) {
return gI;
}
auto grad = grad_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (grad.dim() == 0) grad = grad.view(1);
TORCH_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions");
auto output = output_.contiguous();
if (output.dim() == 0) output = output.view(1);
int64_t outer_size = 1;
int64_t dim_size = output.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= output.size(i);
for (int64_t i = dim + 1; i < output.dim(); ++i)
inner_size *= output.size(i);
// See descriptions of kernels above.
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (inner_size == 1) {
dim3 grid(outer_size);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, gI.scalar_type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
auto gI_ptr = gI.data_ptr<scalar_t>();
auto grad_ptr = grad.data_ptr<scalar_t>();
auto output_ptr = output.data_ptr<scalar_t>();
int64_t remaining = outer_size;
int64_t chunk_size = (1<<31 - 1) / dim_size;
while(remaining > 0) {
dispatch_softmax_backward<scalar_t, scalar_t, accscalar_t, is_log_softmax>(
gI_ptr, grad_ptr, output_ptr, dim_size, dim_size, std::min<int64_t>(remaining, chunk_size));
gI_ptr += chunk_size * dim_size;
grad_ptr += chunk_size * dim_size;
output_ptr += chunk_size * dim_size;
remaining -= chunk_size;
}
} else {
constexpr int ILP = sizeof(float4) / sizeof(scalar_t);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), dim_size
);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} else {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
auto gI_ptr = gI.data_ptr<scalar_t>();
auto grad_ptr = grad.data_ptr<accscalar_t>();
auto output_ptr = output.data_ptr<accscalar_t>();
int64_t remaining = outer_size;
int64_t chunk_size = (1<<31 - 1) / dim_size;
while(remaining > 0) {
dispatch_softmax_backward<accscalar_t, scalar_t, accscalar_t, is_log_softmax>(
gI_ptr, grad_ptr, output_ptr, dim_size, dim_size, std::min<int64_t>(remaining, chunk_size));
gI_ptr += chunk_size * dim_size;
grad_ptr += chunk_size * dim_size;
output_ptr += chunk_size * dim_size;
remaining -= chunk_size;
}
} else {
constexpr int ILP = sizeof(float4) / sizeof(accscalar_t);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), dim_size
);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
});
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, gI.scalar_type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(),
outer_size, dim_size, inner_size
);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(),
outer_size, dim_size, inner_size
);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
}
return gI;
}
}
Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float);
}
Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.scalar_type() != input.scalar_type();
if (half_to_float) {
TORCH_CHECK((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half),
"expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_backward<LogSoftMaxBackwardEpilogue,true>(grad, output, dim, half_to_float);
}
Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<SoftMaxForwardEpilogue,false>(input, dim, half_to_float);
}
Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.scalar_type() != input.scalar_type();
if (half_to_float) {
TORCH_CHECK((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half),
"expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
Tensor tmp = grad * output;
return host_softmax_backward<SoftMaxBackwardEpilogue,false>(tmp, output, dim, half_to_float);
}
}
}
| c041923e11247833b9eb7cde604e69c52e79bf74.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/WrapDimUtils.h>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <c10/macros/Macros.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <type_traits>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <ATen/native/cuda/PersistentSoftmax.cuh>
namespace at {
namespace native {
namespace {
constexpr int ALIGN_BYTES = 16;
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: max_input(max_input), logsum(std::log(sum)) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - max_input - logsum);
}
const AccumT max_input;
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - std::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxForwardEpilogue {
__device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: max_input(max_input)
, sum(sum) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(std::exp(input - max_input) / sum);
}
const AccumT max_input;
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxBackwardEpilogue {
__device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
// XXX: gradOutput that we get here is really gradOutput * output
// Look for cmul in SoftMax_updateGradInput
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - output * sum);
}
const AccumT sum;
};
////////////////////////////////////////////////////////////////////////////////
// Spatial kernel (fast with large inner_size and small dim_size)
////////////////////////////////////////////////////////////////////////////////
// Let's assume that our input has been flattened to have only three dimension:
// outer x dim x inner
// The spatial algorithm tries to parallelize along all of them.
// Within a 2d block threadIdx.y parallelizes over dim slices, and threads that
// share it will speed up reductions over dim (along axis x).
// The 2d grid is used to parallelize inner dimension over y axis and outer over x.
inline dim3 SpatialSoftMax_getGridSize(
dim3 block, uint32_t max_active_blocks,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
// First, tile as many blocks as we can over the y axis
uint32_t inner_blocks = (inner_size + block.y - 1) / block.y;
if (inner_blocks > max_active_blocks)
inner_blocks = max_active_blocks;
// Fill the x axis with as many blocks as we can fit (a little more is ok too)
uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks;
if (outer_blocks > outer_size)
outer_blocks = outer_size;
return dim3(outer_blocks, inner_blocks);
}
const int max_threads = 1024;
inline dim3 SpatialSoftMax_getBlockSize(
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
uint32_t inner_threads = inner_size;
inner_threads = std::min(inner_threads, static_cast<uint32_t>(max_threads));
uint32_t dim_threads = 1;
if (inner_threads <= 64 && dim_size >= 64) {
while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size)
dim_threads *= 2;
dim_threads /= 2;
}
return dim3(dim_threads, inner_threads);
}
template<typename accscalar_t, typename Kernel>
void SpatialSoftMax_getLaunchSizes(
Kernel k,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size,
dim3& grid, dim3& block, uint32_t& smem_size) {
block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size);
uint32_t block_threads = block.x * block.y;
smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t);
int max_active_blocks;
#if defined(__HIP_PLATFORM_HCC__) && HIP_VERSION < 305
// HIP function signature is not compatible yet.
uint32_t max_blocks;
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks,
k, block_threads, smem_size);
max_active_blocks = max_blocks;
#else
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks,
k, block_threads, smem_size);
#endif
max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size);
}
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = std::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
// In the vectorized case we want to trade off allowing more of the buffers to be accessed
// in a vectorized way against wanting a larger block size to get better utilisation.
// In general with ILP you can have (ILP-1)/ILP of the buffer accessed vectorised, at the risk
// of having a very small block size. We choose to keep >= 1/2 of the buffer vectorised while
// allowing a larger block size.
if (ILP > 1) {
max_block_size /= 2;
}
while (block_size < (max_block_size)) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = std::max(block_size, static_cast<uint64_t>(C10_WARP_SIZE));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
// Note that it's not a complete block-wide reduction.
// Only threads that share threadIdx.y reduce values.
template<typename T, template<typename> class ReduceOp>
__forceinline__ __device__
T spatialBlockReduceX(T *shared, T val) {
ReduceOp<T> r;
shared += threadIdx.y * blockDim.x;
__syncthreads();
shared[threadIdx.x] = val;
// NOTE: loop starts with __syncthreads()
int offset = blockDim.x / 2;
while (offset > 0) {
__syncthreads();
if (threadIdx.x < offset)
shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]);
offset /= 2;
}
__syncthreads();
return shared[0];
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxForward(
outscalar_t *output, scalar_t *input,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
////////////////////////////////////////////////////////////
// These two blocks are really equivalent, but specializing on
// blockDim.x == 1 makes the kernel faster when it's unused.
// I didn't want to thread an extra template parameter, and nvcc
// seems to be smart enough to hoist the if outside of the loops.
////////////////////////////////////////////////////////////
if (blockDim.x > 1) {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input);
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
} else {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
}
}
}
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxBackward(
scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
// See the comment in forward kernel
if (blockDim.x > 1) {
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += gradOutput[data_offset + d * dim_stride];
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
} else {
accscalar_t sum = 0;
for (uint32_t d = 0; d < dim_size; d++)
sum += gradOutput[data_offset + d * dim_stride];
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = 0; d < dim_size; d++) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + std::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / C10_WARP_SIZE)) - 1;
if (threadIdx.x < C10_WARP_SIZE) {
int lane = threadIdx.x % C10_WARP_SIZE;
if (lane < blockDim.x / C10_WARP_SIZE) {
#pragma unroll
for (int i = 0; i < C10_WARP_SIZE; ++i) {
warpVal = r(warpVal, smem[lane * C10_WARP_SIZE + i]);
}
#ifndef __HIP_PLATFORM_HCC__
__syncwarp(mask);
#endif
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(int shift,
T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
using LoadT = at::native::memory::aligned_vector<T, ILP>;
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal = r(threadVal, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
threadVal = r(threadVal, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
/**
* This will apply the Epilogue with vectorized reads & writes when input & output have the same shift
*/
template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__device__ __forceinline__ void
WriteFpropResultsVectorized(
int size,
const int shift,
scalar_t *input,
outscalar_t *output,
Epilogue<scalar_t, accum_t, outscalar_t> epilogue) {
using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>;
using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>;
int offset = threadIdx.x;
// if unaligned, do one value / thread and move on, guaranteeing aligned reads/writes later
if (shift > 0) {
input -= shift;
output -= shift;
size += shift;
if (threadIdx.x >= shift) {
output[offset] = epilogue(input[offset]);
}
size -= blockDim.x;
input += blockDim.x;
output += blockDim.x;
}
const int last = size % (ILP * blockDim.x);
scalar_t in_v[ILP];
LoadT* in_value = reinterpret_cast<LoadT*>(&in_v);
outscalar_t out_v[ILP];
StoreT* out_value = reinterpret_cast<StoreT*>(&out_v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*in_value = reinterpret_cast<LoadT*>(input)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
out_v[j] = epilogue(in_v[j]);
}
reinterpret_cast<StoreT*>(output)[offset] = *out_value;
}
offset = size - last + threadIdx.x;
// handle the tail
for (; offset < size; offset += blockDim.x) {
output[offset] = epilogue(input[offset]);
}
}
template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__device__ __forceinline__ void
WriteBpropResultsVectorized(
int size,
const int shift,
scalar_t *gradInput,
outscalar_t *output,
outscalar_t *gradOutput,
Epilogue<scalar_t, accum_t, outscalar_t> epilogue) {
using gradInputT = at::native::memory::aligned_vector<scalar_t, ILP>;
using outputT = at::native::memory::aligned_vector<outscalar_t, ILP>;
int offset = threadIdx.x;
// if unaligned, do one value / thread and move on, guaranteeing aligned reads/writes later
if (shift > 0) {
gradInput -= shift;
output -= shift;
gradOutput -= shift;
size += shift;
if (threadIdx.x >= shift) {
gradInput[offset] = epilogue(gradOutput[offset], output[offset]);
}
size -= blockDim.x;
gradInput += blockDim.x;
output += blockDim.x;
gradOutput += blockDim.x;
}
const int last = size % (ILP * blockDim.x);
scalar_t dX[ILP];
gradInputT *dX_v = reinterpret_cast<gradInputT*>(&dX);
outscalar_t Y[ILP];
outputT *Y_v = reinterpret_cast<outputT*>(&Y);
outscalar_t dY[ILP];
outputT *dY_v = reinterpret_cast<outputT*>(&dY);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*Y_v = reinterpret_cast<outputT*>(output)[offset];
*dY_v = reinterpret_cast<outputT*>(gradOutput)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
dX[j] = epilogue(dY[j], Y[j]);
}
reinterpret_cast<gradInputT*>(gradInput)[offset] = *dX_v;
}
offset = size - last + threadIdx.x;
for (; offset < size; offset += blockDim.x) {
gradInput[offset] = epilogue(gradOutput[offset], output[offset]);
}
}
/**
* This will apply the Epilogue with non-vectrorized reads & writes for the general case
*/
template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__device__ __forceinline__ void
WriteFpropResults(
int classes,
scalar_t *input,
outscalar_t *output,
Epilogue<scalar_t, accum_t, outscalar_t> epilogue) {
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
// Main bulk of loop with ILP
for (; offset < classes - last; offset += blockDim.x * ILP) {
scalar_t tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmp[j] = input[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j) {
output[offset + j * blockDim.x] = epilogue(tmp[j]);
}
}
// Remainder - no ILP
for (; offset < classes; offset += blockDim.x) {
output[offset] = epilogue(input[offset]);
}
}
template <int ILP, typename scalar_t, typename accum_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__device__ __forceinline__ void
WriteBpropResults(
int classes,
scalar_t *gradInput,
outscalar_t *output,
outscalar_t *gradOutput,
Epilogue<scalar_t, accum_t, outscalar_t> epilogue) {
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
outscalar_t tmpOutput[ILP];
outscalar_t tmpGradOutput[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpOutput[j] = output[offset + j * blockDim.x];
tmpGradOutput[j] = gradOutput[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j) {
gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]);
}
}
// Remainder - no ILP
for (; offset < classes; offset += blockDim.x) {
gradInput[offset] = epilogue(gradOutput[offset], output[offset]);
}
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>;
using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>;
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
output += blockIdx.x * classes;
const int shift = ((uint64_t)input) % ALIGN_BYTES / sizeof(scalar_t);
const int output_shift = ((uint64_t)output) % ALIGN_BYTES / sizeof(outscalar_t);
// find the max
accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>(
shift, input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
accscalar_t max_k = blockReduce<Max, accscalar_t>(
sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
// reduce all values
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(
shift, input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
if (shift == output_shift) {
WriteFpropResultsVectorized<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, shift, input, output, epilogue);
} else {
WriteFpropResults<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, input, output, epilogue);
}
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes)
{
using LoadT = at::native::memory::aligned_vector<scalar_t, ILP>;
using StoreT = at::native::memory::aligned_vector<outscalar_t, ILP>;
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
gradInput += blockIdx.x * classes;
output += blockIdx.x * classes;
gradOutput += blockIdx.x * classes;
const int shift = ((uint64_t)gradInput) % ALIGN_BYTES / sizeof(scalar_t);
const int output_shift = ((uint64_t)output) % ALIGN_BYTES / sizeof(outscalar_t);
const int grad_output_shift = ((uint64_t)gradOutput) % ALIGN_BYTES / sizeof(outscalar_t);
accscalar_t threadSum = ilpReduce<AddFloat, ILP, outscalar_t, accscalar_t>(
grad_output_shift, gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0));
accscalar_t sum_k = blockReduce<Add, accscalar_t>(
sdata, threadSum, Add<accscalar_t>(), accscalar_t(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k);
if (shift == output_shift && shift == grad_output_shift) {
WriteBpropResultsVectorized<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, shift, gradInput, output, gradOutput, epilogue);
} else {
WriteBpropResults<ILP, scalar_t, accscalar_t, outscalar_t, Epilogue>(classes, gradInput, output, gradOutput, epilogue);
}
}
template<template<typename, typename, typename> class Epilogue, bool is_log_softmax>
Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){
if (half_to_float) {
TORCH_CHECK(input_.scalar_type() == ScalarType::Half, "conversion is supported for Half type only");
}
auto input = input_.contiguous();
Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (input.dim() == 0) input = input.view(1);
int64_t dim = maybe_wrap_dim(dim_, input.dim());
TORCH_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions");
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
if (input.numel() > 0) {
int64_t inner_size = 1;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
if (inner_size == 1) {
dim3 grid(outer_size);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
auto output_ptr = output.data_ptr<scalar_t>();
auto input_ptr = input.data_ptr<scalar_t>();
int64_t remaining = outer_size;
int64_t chunk_size = (1<<31 - 1) / dim_size;
while(remaining > 0) {
dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, is_log_softmax>(
output_ptr, input_ptr, dim_size, dim_size, std::min<int64_t>(remaining, chunk_size));
input_ptr += chunk_size * dim_size;
output_ptr += chunk_size * dim_size;
remaining -= chunk_size;
}
} else {
constexpr int ILP = sizeof(float4) / sizeof(scalar_t);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} else {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
auto output_ptr = output.data_ptr<accscalar_t>();
auto input_ptr = input.data_ptr<scalar_t>();
int64_t remaining = outer_size;
int64_t chunk_size = (1<<31 - 1) / dim_size;
while(remaining > 0) {
dispatch_softmax_forward<scalar_t, accscalar_t, accscalar_t, is_log_softmax>(
output_ptr, input_ptr, dim_size, dim_size, std::min<int64_t>(remaining, chunk_size));
input_ptr += chunk_size * dim_size;
output_ptr += chunk_size * dim_size;
remaining -= chunk_size;
}
} else {
constexpr int ILP = sizeof(float4) / sizeof(accscalar_t);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
});
// This kernel runs in a 2D grid, where each application along y dimension has a fixed
// outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size.
// Reductions over dim are done in a single-threaded manner.
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
}
}
return output;
}
template<template<typename, typename, typename> class Epilogue, bool is_log_softmax>
Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){
int64_t dim = maybe_wrap_dim(dim_, grad_.dim());
Tensor gI = half_to_float ? at::empty_like(grad_, grad_.options().dtype(ScalarType::Half), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(grad_, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (grad_.numel() == 0) {
return gI;
}
auto grad = grad_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (grad.dim() == 0) grad = grad.view(1);
TORCH_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions");
auto output = output_.contiguous();
if (output.dim() == 0) output = output.view(1);
int64_t outer_size = 1;
int64_t dim_size = output.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= output.size(i);
for (int64_t i = dim + 1; i < output.dim(); ++i)
inner_size *= output.size(i);
// See descriptions of kernels above.
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (inner_size == 1) {
dim3 grid(outer_size);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, gI.scalar_type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
auto gI_ptr = gI.data_ptr<scalar_t>();
auto grad_ptr = grad.data_ptr<scalar_t>();
auto output_ptr = output.data_ptr<scalar_t>();
int64_t remaining = outer_size;
int64_t chunk_size = (1<<31 - 1) / dim_size;
while(remaining > 0) {
dispatch_softmax_backward<scalar_t, scalar_t, accscalar_t, is_log_softmax>(
gI_ptr, grad_ptr, output_ptr, dim_size, dim_size, std::min<int64_t>(remaining, chunk_size));
gI_ptr += chunk_size * dim_size;
grad_ptr += chunk_size * dim_size;
output_ptr += chunk_size * dim_size;
remaining -= chunk_size;
}
} else {
constexpr int ILP = sizeof(float4) / sizeof(scalar_t);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), dim_size
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} else {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
auto gI_ptr = gI.data_ptr<scalar_t>();
auto grad_ptr = grad.data_ptr<accscalar_t>();
auto output_ptr = output.data_ptr<accscalar_t>();
int64_t remaining = outer_size;
int64_t chunk_size = (1<<31 - 1) / dim_size;
while(remaining > 0) {
dispatch_softmax_backward<accscalar_t, scalar_t, accscalar_t, is_log_softmax>(
gI_ptr, grad_ptr, output_ptr, dim_size, dim_size, std::min<int64_t>(remaining, chunk_size));
gI_ptr += chunk_size * dim_size;
grad_ptr += chunk_size * dim_size;
output_ptr += chunk_size * dim_size;
remaining -= chunk_size;
}
} else {
constexpr int ILP = sizeof(float4) / sizeof(accscalar_t);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), dim_size
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
});
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, gI.scalar_type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(),
outer_size, dim_size, inner_size
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(),
outer_size, dim_size, inner_size
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
}
return gI;
}
}
Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float);
}
Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.scalar_type() != input.scalar_type();
if (half_to_float) {
TORCH_CHECK((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half),
"expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_backward<LogSoftMaxBackwardEpilogue,true>(grad, output, dim, half_to_float);
}
Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<SoftMaxForwardEpilogue,false>(input, dim, half_to_float);
}
Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.scalar_type() != input.scalar_type();
if (half_to_float) {
TORCH_CHECK((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half),
"expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
Tensor tmp = grad * output;
return host_softmax_backward<SoftMaxBackwardEpilogue,false>(tmp, output, dim, half_to_float);
}
}
}
|
f4bf6db175e73eb4ba9fdf97f63c25ac38ed6a5e.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/hip/im2col.cuh>
#include <ATen/native/hip/vol2col.cuh>
#include <ATen/native/DilatedConvolutionUtils.h>
#include <c10/util/accumulate.h>
#include <tuple>
namespace at {
namespace native {
namespace {
// hyper-volume to column, CUDA
template <typename Dtype, int64_t dim>
void hvol2col(
hipStream_t stream,
const Dtype* data_hvol,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_col) {
if (dim == 3) {
vol2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_col);
}
if (dim == 2) {
im2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_col);
}
}
// column to hyper-volume, CUDA
template <typename Dtype, int64_t dim>
void col2hvol(
hipStream_t stream,
const Dtype* data_col,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_hvol) {
if (dim == 3) {
col2vol<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_hvol);
}
if (dim == 2) {
col2im<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_hvol);
}
}
/*
check tensor data locations
*/
void slow_conv_dilated_location_check(
CheckedFrom c,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output) {
// checking data locations of user-provided tensor arguments
TensorArg input_arg{input, "input", 2}, weight_arg{weight, "weight", 3},
bias_arg{bias, "bias", 4}, grad_output_arg{grad_output, "grad_output", 5};
checkAllSameGPU(c, {input_arg, weight_arg});
if (bias.defined()) {
checkAllSameGPU(c, {input_arg, bias_arg});
}
if (grad_output.defined()) {
checkAllSameGPU(c, {input_arg, grad_output_arg});
}
// we are not checking the data locations of other tensor
// arguments such as output, grad_input, etc because of these are
// allocated based on input options and hence these tensors always
// have the same data location as of input tensor.
}
/*
slow_conv_dilated_all_cuda_template
Main worker. Computes tensors output, grad_input, grad_weight,
and/or grad_bias if defined, respectively.
*/
template <int64_t dim>
void slow_conv_dilated_all_cuda_template(
Tensor& output,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
slow_conv_dilated_location_check(__func__, input, weight, bias, grad_output);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto options = input.options();
// The rear part of input tensor sizes:
auto input_size = input.sizes().slice(2);
// The rear part of output tensor sizes:
auto output_size = internal::get_output_size<dim>(
input, kernel_size, stride_size, pad_size, dilation_size);
int64_t batchSize = input.size(0);
int64_t nInputPlane = weight.size(1);
int64_t nOutputPlane = weight.size(0);
// Temporary buffers:
const int64_t m = c10::multiply_integers(kernel_size);
const int64_t output_vsize = c10::multiply_integers(output_size);
Tensor columns = at::empty({0}, options);
if (output.defined() || grad_weight.defined() || grad_input.defined()) {
columns.resize_({nInputPlane * m, output_vsize});
}
// Initialize
if (grad_weight.defined()) {
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.zero_();
}
if (output.defined() && !bias.defined()) {
output.zero_();
}
#ifdef __HIP_PLATFORM_HCC__
/* When using ROCm, the sum evaluation is inaccurate for double
tensors. The reason is currently unknown. Hence, we use gemv for
computing `grad_output_n.sum(dims)` until the ROCm-sum issue is
resolved. */
Tensor ones = at::empty({0}, options);
if (grad_bias.defined()) {
ones.resize_({output_vsize});
ones.fill_(1);
}
/* MSVC does not like #ifdef-s inside the CPP macro
AT_DISPATCH_FLOATING_TYPES_AND_HALF. So, we define the code
branching outside the CPP macro: */
#define CALCULATE_GRAD_BIAS \
at::cuda::blas::gemv<scalar_t>( \
/*trans=*/'t', \
/* m=*/output_vsize, \
/* n=*/nOutputPlane, \
/*alpha=*/static_cast<scalar_t>(1), \
/* A=*/grad_output_n.data_ptr<scalar_t>(), \
/* lda=*/output_vsize, \
/* x=*/ones.data_ptr<scalar_t>(), \
/* incx=*/1, \
/* beta=*/static_cast<scalar_t>(1), \
/* y=*/grad_bias.data_ptr<scalar_t>(), \
/* incy=*/1)
#else
#define CALCULATE_GRAD_BIAS grad_bias += grad_output_n.sum(dims)
#endif
// Helpers
Tensor grad_output_n;
std::vector<int64_t> dims(dim);
std::iota(dims.begin(), dims.end(), 1);
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_dilated<>", [&] {
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt++) {
// Matrix multiply per output:
Tensor input_n = input.select(0, elt);
// Output
if (output.defined()) {
Tensor output_n = output.select(0, elt);
if (bias.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
for (int n = 0; n < nOutputPlane; n++) {
output_n.select(0, n).fill_(bias[n]);
}
}
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'n',
/* m=*/columns.size(1),
/* n=*/nOutputPlane,
/* k=*/columns.size(0),
/* alpha=*/static_cast<scalar_t>(1),
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/static_cast<scalar_t>(1),
/* C=*/output_n.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
} else {
// All gradients
grad_output_n = grad_output.select(0, elt);
}
// Gradient of input:
if (grad_input.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'t',
/* m=*/columns.size(1),
/* n=*/columns.size(0),
/* k=*/nOutputPlane,
/* alpha=*/static_cast<scalar_t>(1),
/* A=*/grad_output_n.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/static_cast<scalar_t>(0),
/* C=*/columns.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
// Unpack columns back into input:
Tensor grad_input_n = grad_input.select(0, elt);
col2hvol<scalar_t, dim>(
stream,
columns.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
grad_input_n.data_ptr<scalar_t>());
}
// Gradient of weight:
if (grad_weight.defined()) {
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
scalar_t scale = static_cast<scalar_t>(
1); // TODO: expose as argument?
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'t',
/*transb=*/'n',
/* m=*/columns.size(0),
/* n=*/nOutputPlane,
/* k=*/columns.size(1),
/* alpha=*/scale,
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/grad_output_n.data_ptr<scalar_t>(),
/* ldb=*/columns.size(1),
/* beta=*/static_cast<scalar_t>(1),
/* C=*/grad_weight.data_ptr<scalar_t>(),
/* ldc=*/columns.size(0));
}
// Gradient of bias:
if (grad_bias.defined()) {
/* For gemv argument derivation, see
slow_conv_dilated_all_cpu_template in
ATen/native/DilatedConvolution.cpp */
CALCULATE_GRAD_BIAS; /* MSVC does not like #ifdef-s
inside the CPP macros, see above. */
/*
TODO: when scale != 1 is introduced then use:
grad_bias += scale * grad_output_n.sum(dims);
*/
}
}
});
} // slow_conv_dilated_all_cuda_template
} // namespace
Tensor slow_conv_dilated2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<2>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<2>(
output_,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<2>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
Tensor slow_conv_dilated3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<3>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<3>(
output,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<3>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
| f4bf6db175e73eb4ba9fdf97f63c25ac38ed6a5e.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/cuda/im2col.cuh>
#include <ATen/native/cuda/vol2col.cuh>
#include <ATen/native/DilatedConvolutionUtils.h>
#include <c10/util/accumulate.h>
#include <tuple>
namespace at {
namespace native {
namespace {
// hyper-volume to column, CUDA
template <typename Dtype, int64_t dim>
void hvol2col(
cudaStream_t stream,
const Dtype* data_hvol,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_col) {
if (dim == 3) {
vol2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_col);
}
if (dim == 2) {
im2col<Dtype>(
stream,
data_hvol,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_col);
}
}
// column to hyper-volume, CUDA
template <typename Dtype, int64_t dim>
void col2hvol(
cudaStream_t stream,
const Dtype* data_col,
const int channels,
const IntArrayRef input_size,
const IntArrayRef output_size,
const IntArrayRef kernel_size,
const IntArrayRef stride_size,
const IntArrayRef pad_size,
const IntArrayRef dilation_size,
Dtype* data_hvol) {
if (dim == 3) {
col2vol<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
input_size[2],
output_size[0],
output_size[1],
output_size[2],
kernel_size[0],
kernel_size[1],
kernel_size[2],
pad_size[0],
pad_size[1],
pad_size[2],
stride_size[0],
stride_size[1],
stride_size[2],
dilation_size[0],
dilation_size[1],
dilation_size[2],
data_hvol);
}
if (dim == 2) {
col2im<Dtype, Dtype>(
stream,
data_col,
channels,
input_size[0],
input_size[1],
output_size[0],
output_size[1],
kernel_size[0],
kernel_size[1],
pad_size[0],
pad_size[1],
stride_size[0],
stride_size[1],
dilation_size[0],
dilation_size[1],
data_hvol);
}
}
/*
check tensor data locations
*/
void slow_conv_dilated_location_check(
CheckedFrom c,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output) {
// checking data locations of user-provided tensor arguments
TensorArg input_arg{input, "input", 2}, weight_arg{weight, "weight", 3},
bias_arg{bias, "bias", 4}, grad_output_arg{grad_output, "grad_output", 5};
checkAllSameGPU(c, {input_arg, weight_arg});
if (bias.defined()) {
checkAllSameGPU(c, {input_arg, bias_arg});
}
if (grad_output.defined()) {
checkAllSameGPU(c, {input_arg, grad_output_arg});
}
// we are not checking the data locations of other tensor
// arguments such as output, grad_input, etc because of these are
// allocated based on input options and hence these tensors always
// have the same data location as of input tensor.
}
/*
slow_conv_dilated_all_cuda_template
Main worker. Computes tensors output, grad_input, grad_weight,
and/or grad_bias if defined, respectively.
*/
template <int64_t dim>
void slow_conv_dilated_all_cuda_template(
Tensor& output,
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output,
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
slow_conv_dilated_location_check(__func__, input, weight, bias, grad_output);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto options = input.options();
// The rear part of input tensor sizes:
auto input_size = input.sizes().slice(2);
// The rear part of output tensor sizes:
auto output_size = internal::get_output_size<dim>(
input, kernel_size, stride_size, pad_size, dilation_size);
int64_t batchSize = input.size(0);
int64_t nInputPlane = weight.size(1);
int64_t nOutputPlane = weight.size(0);
// Temporary buffers:
const int64_t m = c10::multiply_integers(kernel_size);
const int64_t output_vsize = c10::multiply_integers(output_size);
Tensor columns = at::empty({0}, options);
if (output.defined() || grad_weight.defined() || grad_input.defined()) {
columns.resize_({nInputPlane * m, output_vsize});
}
// Initialize
if (grad_weight.defined()) {
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.zero_();
}
if (output.defined() && !bias.defined()) {
output.zero_();
}
#ifdef __HIP_PLATFORM_HCC__
/* When using ROCm, the sum evaluation is inaccurate for double
tensors. The reason is currently unknown. Hence, we use gemv for
computing `grad_output_n.sum(dims)` until the ROCm-sum issue is
resolved. */
Tensor ones = at::empty({0}, options);
if (grad_bias.defined()) {
ones.resize_({output_vsize});
ones.fill_(1);
}
/* MSVC does not like #ifdef-s inside the CPP macro
AT_DISPATCH_FLOATING_TYPES_AND_HALF. So, we define the code
branching outside the CPP macro: */
#define CALCULATE_GRAD_BIAS \
at::cuda::blas::gemv<scalar_t>( \
/*trans=*/'t', \
/* m=*/output_vsize, \
/* n=*/nOutputPlane, \
/*alpha=*/static_cast<scalar_t>(1), \
/* A=*/grad_output_n.data_ptr<scalar_t>(), \
/* lda=*/output_vsize, \
/* x=*/ones.data_ptr<scalar_t>(), \
/* incx=*/1, \
/* beta=*/static_cast<scalar_t>(1), \
/* y=*/grad_bias.data_ptr<scalar_t>(), \
/* incy=*/1)
#else
#define CALCULATE_GRAD_BIAS grad_bias += grad_output_n.sum(dims)
#endif
// Helpers
Tensor grad_output_n;
std::vector<int64_t> dims(dim);
std::iota(dims.begin(), dims.end(), 1);
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_dilated<>", [&] {
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt++) {
// Matrix multiply per output:
Tensor input_n = input.select(0, elt);
// Output
if (output.defined()) {
Tensor output_n = output.select(0, elt);
if (bias.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
for (int n = 0; n < nOutputPlane; n++) {
output_n.select(0, n).fill_(bias[n]);
}
}
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'n',
/* m=*/columns.size(1),
/* n=*/nOutputPlane,
/* k=*/columns.size(0),
/* alpha=*/static_cast<scalar_t>(1),
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/static_cast<scalar_t>(1),
/* C=*/output_n.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
} else {
// All gradients
grad_output_n = grad_output.select(0, elt);
}
// Gradient of input:
if (grad_input.defined()) {
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'n',
/*transb=*/'t',
/* m=*/columns.size(1),
/* n=*/columns.size(0),
/* k=*/nOutputPlane,
/* alpha=*/static_cast<scalar_t>(1),
/* A=*/grad_output_n.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/weight.data_ptr<scalar_t>(),
/* ldb=*/columns.size(0),
/* beta=*/static_cast<scalar_t>(0),
/* C=*/columns.data_ptr<scalar_t>(),
/* ldc=*/columns.size(1));
// Unpack columns back into input:
Tensor grad_input_n = grad_input.select(0, elt);
col2hvol<scalar_t, dim>(
stream,
columns.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
grad_input_n.data_ptr<scalar_t>());
}
// Gradient of weight:
if (grad_weight.defined()) {
// Extract columns:
hvol2col<scalar_t, dim>(
stream,
input_n.data_ptr<scalar_t>(),
nInputPlane,
input_size,
output_size,
kernel_size,
stride_size,
pad_size,
dilation_size,
columns.data_ptr<scalar_t>());
scalar_t scale = static_cast<scalar_t>(
1); // TODO: expose as argument?
/* For gemm argument derivation, see
slow_conv_dilated_all_cuda_template in
ATen/native/DilatedConvolution.cpp */
at::cuda::blas::gemm<scalar_t>(
/*transa=*/'t',
/*transb=*/'n',
/* m=*/columns.size(0),
/* n=*/nOutputPlane,
/* k=*/columns.size(1),
/* alpha=*/scale,
/* A=*/columns.data_ptr<scalar_t>(),
/* lda=*/columns.size(1),
/* B=*/grad_output_n.data_ptr<scalar_t>(),
/* ldb=*/columns.size(1),
/* beta=*/static_cast<scalar_t>(1),
/* C=*/grad_weight.data_ptr<scalar_t>(),
/* ldc=*/columns.size(0));
}
// Gradient of bias:
if (grad_bias.defined()) {
/* For gemv argument derivation, see
slow_conv_dilated_all_cpu_template in
ATen/native/DilatedConvolution.cpp */
CALCULATE_GRAD_BIAS; /* MSVC does not like #ifdef-s
inside the CPP macros, see above. */
/*
TODO: when scale != 1 is introduced then use:
grad_bias += scale * grad_output_n.sum(dims);
*/
}
}
});
} // slow_conv_dilated_all_cuda_template
} // namespace
Tensor slow_conv_dilated2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<2>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<2>(
output_,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<2>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 4;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<2>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
Tensor slow_conv_dilated3d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt);
const Tensor& bias = *bias_maybe_owned;
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
bias,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = input.options();
// calculate output tensor size
auto output_size = internal::get_output_size<3>(
input, weight, kernel_size, stride_size, pad_size, dilation_size);
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined);
Tensor output = at::empty(output_size, options);
Tensor output_ = (is_batch ? output : output.unsqueeze(0));
slow_conv_dilated_all_cuda_template<3>(
output,
input_,
weight_,
bias_,
undefined,
undefined,
undefined,
undefined,
kernel_size,
stride_size,
pad_size,
dilation_size);
return output;
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_dilated3d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size,
const std::array<bool, 3ul> output_mask) {
Tensor undefined;
internal::slow_conv_dilated_shape_check<3>(
input,
weight,
undefined,
grad_output,
kernel_size,
stride_size,
pad_size,
dilation_size);
auto is_batch = input.dim() == 5;
auto options = grad_output.options();
// template function assumes batched tensors. unsqueeze(0) will
// insert batch dimension without affecting the original tensor.
const Tensor grad_output_ =
(is_batch ? grad_output.contiguous()
: grad_output.contiguous().unsqueeze(0));
const Tensor input_ =
(is_batch ? input.contiguous() : input.contiguous().unsqueeze(0));
const Tensor weight_ = weight.contiguous();
// compute only gradients for which the corresponding output_mask is true:
Tensor grad_input =
(output_mask[0] ? at::empty(input.sizes(), options) : undefined);
Tensor grad_weight =
(output_mask[1] ? at::empty(weight.sizes(), options) : undefined);
Tensor grad_bias =
(output_mask[2] ? at::empty(weight.size(0), options) : undefined);
Tensor grad_input_ =
(output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0))
: undefined);
slow_conv_dilated_all_cuda_template<3>(
undefined,
input_,
weight_,
undefined,
grad_output_,
grad_input,
grad_weight,
grad_bias,
kernel_size,
stride_size,
pad_size,
dilation_size);
return std::tie(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
51acc885f298324e69c58b956f39fbd472c3aa10.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
| 51acc885f298324e69c58b956f39fbd472c3aa10.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::Gemm<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
2>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
26b704ef8f21cf1718bd4efa01c5914a186af371.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <float.h>
#include <stdint.h>
#include <stdio.h>
#include <limits>
#include "BufferCompaction.h"
#include "ExtensionFunctions.hpp"
#include "GpuRtConstants.h"
#include "HyperLogLogRank.h"
#include "TableFunctions/TableFunctions.hpp"
extern "C" __device__ int64_t get_thread_index() {
return threadIdx.x;
}
extern "C" __device__ int64_t get_block_index() {
return blockIdx.x;
}
extern "C" __device__ int32_t pos_start_impl(const int32_t* row_index_resume) {
return blockIdx.x * blockDim.x + threadIdx.x;
}
extern "C" __device__ int32_t group_buff_idx_impl() {
return pos_start_impl(NULL);
}
extern "C" __device__ int32_t pos_step_impl() {
return blockDim.x * gridDim.x;
}
extern "C" __device__ int8_t thread_warp_idx(const int8_t warp_sz) {
return threadIdx.x % warp_sz;
}
extern "C" __device__ const int64_t* init_shared_mem_nop(
const int64_t* groups_buffer,
const int32_t groups_buffer_size) {
return groups_buffer;
}
extern "C" __device__ void write_back_nop(int64_t* dest, int64_t* src, const int32_t sz) {
}
/*
* Just declares and returns a dynamic shared memory pointer. Total size should be
* properly set during kernel launch
*/
extern "C" __device__ int64_t* declare_dynamic_shared_memory() {
extern __shared__ int64_t shared_mem_buffer[];
return shared_mem_buffer;
}
/**
* Initializes the shared memory buffer for perfect hash group by.
* In this function, we simply copy the global group by buffer (already initialized on the
* host and transferred) to all shared memory group by buffers.
*/
extern "C" __device__ const int64_t* init_shared_mem(const int64_t* global_groups_buffer,
const int32_t groups_buffer_size) {
// dynamic shared memory declaration
extern __shared__ int64_t shared_groups_buffer[];
// it is assumed that buffer size is aligned with 64-bit units
// so it is safe to assign 64-bit to each thread
const int32_t buffer_units = groups_buffer_size >> 3;
for (int32_t pos = threadIdx.x; pos < buffer_units; pos += blockDim.x) {
shared_groups_buffer[pos] = global_groups_buffer[pos];
}
__syncthreads();
return shared_groups_buffer;
}
#define init_group_by_buffer_gpu_impl init_group_by_buffer_gpu
#include "GpuInitGroups.cu"
#undef init_group_by_buffer_gpu_impl
// Dynamic watchdog: monitoring up to 64 SMs. E.g. GP100 config may have 60:
// 6 Graphics Processing Clusters (GPCs) * 10 Streaming Multiprocessors
// TODO(Saman): move these into a kernel parameter, allocated and initialized through CUDA
__device__ int64_t dw_sm_cycle_start[128]; // Set from host before launching the kernel
// TODO(Saman): make this cycle budget something constant in codegen level
__device__ int64_t dw_cycle_budget = 0; // Set from host before launching the kernel
__device__ int32_t dw_abort = 0; // TBD: set from host (async)
__device__ int32_t runtime_interrupt_flag = 0;
__inline__ __device__ uint32_t get_smid(void) {
uint32_t ret;
asm("mov.u32 %0, %%smid;" : "=r"(ret));
return ret;
}
/*
* The main objective of this function is to return true, if any of the following two
* scenarios happen:
* 1. receives a host request for aborting the kernel execution
* 2. kernel execution takes longer clock cycles than it was initially allowed
* The assumption is that all (or none) threads within a block return true for the
* watchdog, and the first thread within each block compares the recorded clock cycles for
* its occupying SM with the allowed budget. It also assumes that all threads entering
* this function are active (no critical edge exposure)
* NOTE: dw_cycle_budget, dw_abort, and dw_sm_cycle_start[] are all variables in global
* memory scope.
*/
extern "C" __device__ bool dynamic_watchdog() {
// check for dynamic watchdog, if triggered all threads return true
if (dw_cycle_budget == 0LL) {
return false; // Uninitialized watchdog can't check time
}
if (dw_abort == 1) {
return true; // Received host request to abort
}
uint32_t smid = get_smid();
if (smid >= 128) {
return false;
}
__shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start
__shared__ volatile bool
dw_should_terminate; // all threads within a block should return together if
// watchdog criteria is met
// thread 0 either initializes or read the initial clock cycle, the result is stored
// into shared memory. Since all threads wihtin a block shares the same SM, there's no
// point in using more threads here.
if (threadIdx.x == 0) {
dw_block_cycle_start = 0LL;
int64_t cycle_count = static_cast<int64_t>(clock64());
// Make sure the block hasn't switched SMs
if (smid == get_smid()) {
dw_block_cycle_start = static_cast<int64_t>(
atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]),
0ULL,
static_cast<unsigned long long>(cycle_count)));
}
int64_t cycles = cycle_count - dw_block_cycle_start;
if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) &&
(cycles > dw_cycle_budget)) {
// Check if we're out of time on this particular SM
dw_should_terminate = true;
} else {
dw_should_terminate = false;
}
}
__syncthreads();
return dw_should_terminate;
}
extern "C" __device__ bool check_interrupt() {
return (runtime_interrupt_flag == 1) ? true : false;
}
template <typename T = unsigned long long>
inline __device__ T get_empty_key() {
return EMPTY_KEY_64;
}
template <>
inline __device__ unsigned int get_empty_key() {
return EMPTY_KEY_32;
}
template <typename T>
inline __device__ int64_t* get_matching_group_value(int64_t* groups_buffer,
const uint32_t h,
const T* key,
const uint32_t key_count,
const uint32_t row_size_quad) {
const T empty_key = get_empty_key<T>();
uint32_t off = h * row_size_quad;
auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
{
const T old = atomicCAS(row_ptr, empty_key, *key);
if (empty_key == old && key_count > 1) {
for (size_t i = 1; i <= key_count - 1; ++i) {
atomicExch(row_ptr + i, key[i]);
}
}
}
if (key_count > 1) {
while (atomicAdd(row_ptr + key_count - 1, 0) == empty_key) {
// spin until the winning thread has finished writing the entire key and the init
// value
}
}
bool match = true;
for (uint32_t i = 0; i < key_count; ++i) {
if (row_ptr[i] != key[i]) {
match = false;
break;
}
}
if (match) {
auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
}
return NULL;
}
extern "C" __device__ int64_t* get_matching_group_value(int64_t* groups_buffer,
const uint32_t h,
const int64_t* key,
const uint32_t key_count,
const uint32_t key_width,
const uint32_t row_size_quad,
const int64_t* init_vals) {
switch (key_width) {
case 4:
return get_matching_group_value(groups_buffer,
h,
reinterpret_cast<const unsigned int*>(key),
key_count,
row_size_quad);
case 8:
return get_matching_group_value(groups_buffer,
h,
reinterpret_cast<const unsigned long long*>(key),
key_count,
row_size_quad);
default:
return NULL;
}
}
template <typename T>
__device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer,
const uint32_t entry_count,
const uint32_t h,
const T* key,
const uint32_t key_count) {
const T empty_key = get_empty_key<T>();
const uint64_t old =
atomicCAS(reinterpret_cast<T*>(groups_buffer + h), empty_key, *key);
// the winner thread proceeds with writing the rest fo the keys
if (old == empty_key) {
uint32_t offset = h + entry_count;
for (size_t i = 1; i < key_count; ++i) {
*reinterpret_cast<T*>(groups_buffer + offset) = key[i];
offset += entry_count;
}
}
__threadfence();
// for all threads except the winning thread, memory content of the keys
// related to the hash offset are checked again. In case of a complete match
// the hash offset is returned, otherwise -1 is returned
if (old != empty_key) {
uint32_t offset = h;
for (uint32_t i = 0; i < key_count; ++i) {
if (*reinterpret_cast<T*>(groups_buffer + offset) != key[i]) {
return -1;
}
offset += entry_count;
}
}
return h;
}
extern "C" __device__ int32_t
get_matching_group_value_columnar_slot(int64_t* groups_buffer,
const uint32_t entry_count,
const uint32_t h,
const int64_t* key,
const uint32_t key_count,
const uint32_t key_width) {
switch (key_width) {
case 4:
return get_matching_group_value_columnar_slot(
groups_buffer,
entry_count,
h,
reinterpret_cast<const unsigned int*>(key),
key_count);
case 8:
return get_matching_group_value_columnar_slot(
groups_buffer,
entry_count,
h,
reinterpret_cast<const unsigned long long*>(key),
key_count);
default:
return -1;
}
}
extern "C" __device__ int64_t* get_matching_group_value_columnar(
int64_t* groups_buffer,
const uint32_t h,
const int64_t* key,
const uint32_t key_qw_count,
const size_t entry_count) {
uint32_t off = h;
{
const uint64_t old = atomicCAS(
reinterpret_cast<unsigned long long*>(groups_buffer + off), EMPTY_KEY_64, *key);
if (EMPTY_KEY_64 == old) {
for (size_t i = 0; i < key_qw_count; ++i) {
groups_buffer[off] = key[i];
off += entry_count;
}
return &groups_buffer[off];
}
}
__syncthreads();
off = h;
for (size_t i = 0; i < key_qw_count; ++i) {
if (groups_buffer[off] != key[i]) {
return NULL;
}
off += entry_count;
}
return &groups_buffer[off];
}
#include "GroupByRuntime.cpp"
#include "JoinHashTableQueryRuntime.cpp"
#include "MurmurHash.cpp"
#include "TopKRuntime.cpp"
__device__ int64_t atomicMax64(int64_t* address, int64_t val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, max((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
__device__ int64_t atomicMin64(int64_t* address, int64_t val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, min((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
// As of 20160418, CUDA 8.0EA only defines `atomicAdd(double*, double)` for compute
// capability >= 6.0.
#if TORCH_HIP_VERSION < 8000 || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600)
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__ double atomicMax(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(max(val, __longlong_as_double(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ float atomicMax(float* address, float val) {
int* address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_int, assumed, __float_as_int(max(val, __int_as_float(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __int_as_float(old);
}
__device__ double atomicMin(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ double atomicMin(float* address, float val) {
int* address_as_ull = (int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_ull, assumed, __float_as_int(min(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
extern "C" __device__ uint64_t agg_count_shared(uint64_t* agg, const int64_t val) {
return static_cast<uint64_t>(atomicAdd(reinterpret_cast<uint32_t*>(agg), 1UL));
}
extern "C" __device__ uint32_t agg_count_int32_shared(uint32_t* agg, const int32_t val) {
return atomicAdd(agg, 1UL);
}
extern "C" __device__ uint64_t agg_count_double_shared(uint64_t* agg, const double val) {
return agg_count_shared(agg, val);
}
extern "C" __device__ uint32_t agg_count_float_shared(uint32_t* agg, const float val) {
return agg_count_int32_shared(agg, val);
}
extern "C" __device__ int64_t agg_sum_shared(int64_t* agg, const int64_t val) {
return atomicAdd(reinterpret_cast<unsigned long long*>(agg), val);
}
extern "C" __device__ int32_t agg_sum_int32_shared(int32_t* agg, const int32_t val) {
return atomicAdd(agg, val);
}
extern "C" __device__ void agg_sum_float_shared(int32_t* agg, const float val) {
atomicAdd(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_sum_double_shared(int64_t* agg, const double val) {
atomicAdd(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_max_shared(int64_t* agg, const int64_t val) {
atomicMax64(agg, val);
}
extern "C" __device__ void agg_max_int32_shared(int32_t* agg, const int32_t val) {
atomicMax(agg, val);
}
extern "C" __device__ void agg_max_double_shared(int64_t* agg, const double val) {
atomicMax(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_max_float_shared(int32_t* agg, const float val) {
atomicMax(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_min_shared(int64_t* agg, const int64_t val) {
atomicMin64(agg, val);
}
extern "C" __device__ void agg_min_int32_shared(int32_t* agg, const int32_t val) {
atomicMin(agg, val);
}
// TODO(Saman): use 16-bit atomicCAS for Turing
extern "C" __device__ void atomicMax16(int16_t* agg, const int16_t val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
swap_value =
(reinterpret_cast<size_t>(agg) & 0x2)
? static_cast<unsigned int>(max(static_cast<int16_t>(old_value >> 16), val))
<< 16 |
(old_value & 0xFFFF)
: (old_value & 0xFFFF0000) |
static_cast<unsigned int>(
max(static_cast<int16_t>(old_value & 0xFFFF), val));
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (old_value != compare_value);
}
extern "C" __device__ void atomicMax8(int8_t* agg, const int8_t val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
// __byte_perm(unsigned int A, unsigned int B, unsigned int s):
// if s == 0x3214 returns {A[31..24], A[23..16], A[15..8], B[7..0]}
// if s == 0x3240 returns {A[31..24], A[23..16], B[7...0], A[7..0]}
// if s == 0x3410 returns {A[31..24], B[7....0], A[15..8], A[7..0]}
// if s == 0x4210 returns {B[7....0], A[23..16], A[15..8], A[7..0]}
constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
auto max_value = static_cast<unsigned int>(
// compare val with its corresponding bits in the compare_value
max(val,
static_cast<int8_t>(__byte_perm(
compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440))));
swap_value = __byte_perm(
compare_value, max_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]);
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (compare_value != old_value);
}
extern "C" __device__ void atomicMin16(int16_t* agg, const int16_t val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
swap_value =
(reinterpret_cast<size_t>(agg) & 0x2)
? static_cast<unsigned int>(min(static_cast<int16_t>(old_value >> 16), val))
<< 16 |
(old_value & 0xFFFF)
: (old_value & 0xFFFF0000) |
static_cast<unsigned int>(
min(static_cast<int16_t>(old_value & 0xFFFF), val));
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (old_value != compare_value);
}
extern "C" __device__ void atomicMin16SkipVal(int16_t* agg,
const int16_t val,
const int16_t skip_val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
int16_t selected_old_val = (reinterpret_cast<size_t>(agg) & 0x2)
? static_cast<int16_t>(old_value >> 16)
: static_cast<int16_t>(old_value & 0xFFFF);
swap_value =
(reinterpret_cast<size_t>(agg) & 0x2)
? static_cast<unsigned int>(
selected_old_val == skip_val ? val : min(selected_old_val, val))
<< 16 |
(old_value & 0xFFFF)
: (old_value & 0xFFFF0000) |
static_cast<unsigned int>(
selected_old_val == skip_val ? val : min(selected_old_val, val));
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (old_value != compare_value);
}
extern "C" __device__ void atomicMin8(int8_t* agg, const int8_t val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
auto min_value = static_cast<unsigned int>(
min(val,
static_cast<int8_t>(__byte_perm(
compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440))));
swap_value = __byte_perm(
compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]);
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (compare_value != old_value);
}
extern "C" __device__ void atomicMin8SkipVal(int8_t* agg,
const int8_t val,
const int8_t skip_val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
int8_t selected_old_val = static_cast<int8_t>(
__byte_perm(compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440));
auto min_value = static_cast<unsigned int>(
selected_old_val == skip_val ? val : min(val, selected_old_val));
swap_value = __byte_perm(
compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]);
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (compare_value != old_value);
}
extern "C" __device__ void agg_max_int16_shared(int16_t* agg, const int16_t val) {
return atomicMax16(agg, val);
}
extern "C" __device__ void agg_max_int8_shared(int8_t* agg, const int8_t val) {
return atomicMax8(agg, val);
}
extern "C" __device__ void agg_min_int16_shared(int16_t* agg, const int16_t val) {
return atomicMin16(agg, val);
}
extern "C" __device__ void agg_min_int8_shared(int8_t* agg, const int8_t val) {
return atomicMin8(agg, val);
}
extern "C" __device__ void agg_min_double_shared(int64_t* agg, const double val) {
atomicMin(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_min_float_shared(int32_t* agg, const float val) {
atomicMin(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_id_shared(int64_t* agg, const int64_t val) {
*agg = val;
}
extern "C" __device__ int32_t checked_single_agg_id_shared(int64_t* agg,
const int64_t val,
const int64_t null_val) {
unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg);
unsigned long long int old = *address_as_ull, assumed;
if (val == null_val) {
return 0;
}
do {
if (static_cast<int64_t>(old) != null_val) {
if (static_cast<int64_t>(old) != val) {
// see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
return 15;
} else {
break;
}
}
assumed = old;
old = atomicCAS(address_as_ull, assumed, val);
} while (assumed != old);
return 0;
}
#define DEF_AGG_ID_INT_SHARED(n) \
extern "C" __device__ void agg_id_int##n##_shared(int##n##_t* agg, \
const int##n##_t val) { \
*agg = val; \
}
DEF_AGG_ID_INT_SHARED(32)
DEF_AGG_ID_INT_SHARED(16)
DEF_AGG_ID_INT_SHARED(8)
#undef DEF_AGG_ID_INT_SHARED
extern "C" __device__ void agg_id_double_shared(int64_t* agg, const double val) {
*agg = *(reinterpret_cast<const int64_t*>(&val));
}
extern "C" __device__ int32_t checked_single_agg_id_double_shared(int64_t* agg,
const double val,
const double null_val) {
unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg);
unsigned long long int old = *address_as_ull, assumed;
if (val == null_val) {
return 0;
}
do {
if (static_cast<int64_t>(old) != __double_as_longlong(null_val)) {
if (static_cast<int64_t>(old) != __double_as_longlong(val)) {
// see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
return 15;
} else {
break;
}
}
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val));
} while (assumed != old);
return 0;
}
extern "C" __device__ void agg_id_double_shared_slow(int64_t* agg, const double* val) {
*agg = *(reinterpret_cast<const int64_t*>(val));
}
extern "C" __device__ int32_t
checked_single_agg_id_double_shared_slow(int64_t* agg,
const double* valp,
const double null_val) {
unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg);
unsigned long long int old = *address_as_ull, assumed;
double val = *valp;
if (val == null_val) {
return 0;
}
do {
if (static_cast<int64_t>(old) != __double_as_longlong(null_val)) {
if (static_cast<int64_t>(old) != __double_as_longlong(val)) {
// see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
return 15;
} else {
break;
}
}
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val));
} while (assumed != old);
return 0;
}
extern "C" __device__ void agg_id_float_shared(int32_t* agg, const float val) {
*agg = __float_as_int(val);
}
extern "C" __device__ int32_t checked_single_agg_id_float_shared(int32_t* agg,
const float val,
const float null_val) {
int* address_as_ull = reinterpret_cast<int*>(agg);
int old = *address_as_ull, assumed;
if (val == null_val) {
return 0;
}
do {
if (old != __float_as_int(null_val)) {
if (old != __float_as_int(val)) {
// see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
return 15;
} else {
break;
}
}
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val));
} while (assumed != old);
return 0;
}
#define DEF_SKIP_AGG(base_agg_func) \
extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
return base_agg_func##_shared(agg, val); \
} \
return 0; \
}
#define DATA_T int64_t
#define ADDR_T uint64_t
DEF_SKIP_AGG(agg_count)
#undef DATA_T
#undef ADDR_T
#define DATA_T int32_t
#define ADDR_T uint32_t
DEF_SKIP_AGG(agg_count_int32)
#undef DATA_T
#undef ADDR_T
// Initial value for nullable column is INT32_MIN
extern "C" __device__ void agg_max_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
agg_max_int32_shared(agg, val);
}
}
extern "C" __device__ void agg_max_int16_skip_val_shared(int16_t* agg,
const int16_t val,
const int16_t skip_val) {
if (val != skip_val) {
agg_max_int16_shared(agg, val);
}
}
extern "C" __device__ void agg_min_int16_skip_val_shared(int16_t* agg,
const int16_t val,
const int16_t skip_val) {
if (val != skip_val) {
atomicMin16SkipVal(agg, val, skip_val);
}
}
extern "C" __device__ void agg_max_int8_skip_val_shared(int8_t* agg,
const int8_t val,
const int8_t skip_val) {
if (val != skip_val) {
agg_max_int8_shared(agg, val);
}
}
extern "C" __device__ void agg_min_int8_skip_val_shared(int8_t* agg,
const int8_t val,
const int8_t skip_val) {
if (val != skip_val) {
atomicMin8SkipVal(agg, val, skip_val);
}
}
__device__ int32_t atomicMin32SkipVal(int32_t* address,
int32_t val,
const int32_t skip_val) {
int32_t old = atomicExch(address, INT_MAX);
return atomicMin(address, old == skip_val ? val : min(old, val));
}
extern "C" __device__ void agg_min_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
atomicMin32SkipVal(agg, val, skip_val);
}
}
__device__ int32_t atomicSum32SkipVal(int32_t* address,
const int32_t val,
const int32_t skip_val) {
unsigned int* address_as_int = (unsigned int*)address;
int32_t old = atomicExch(address_as_int, 0);
int32_t old2 = atomicAdd(address_as_int, old == skip_val ? val : (val + old));
return old == skip_val ? old2 : (old2 + old);
}
extern "C" __device__ int32_t agg_sum_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
const int32_t old = atomicSum32SkipVal(agg, val, skip_val);
return old;
}
return 0;
}
__device__ int64_t atomicSum64SkipVal(int64_t* address,
const int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
int64_t old = atomicExch(address_as_ull, 0);
int64_t old2 = atomicAdd(address_as_ull, old == skip_val ? val : (val + old));
return old == skip_val ? old2 : (old2 + old);
}
extern "C" __device__ int64_t agg_sum_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
return atomicSum64SkipVal(agg, val, skip_val);
}
return 0;
}
__device__ int64_t atomicMin64SkipVal(int64_t* address,
int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val ? val : min((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
extern "C" __device__ void agg_min_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
atomicMin64SkipVal(agg, val, skip_val);
}
}
__device__ int64_t atomicMax64SkipVal(int64_t* address,
int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val ? val : max((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
extern "C" __device__ void agg_max_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
atomicMax64SkipVal(agg, val, skip_val);
}
}
#undef DEF_SKIP_AGG
#define DEF_SKIP_AGG(base_agg_func) \
extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
return base_agg_func##_shared(agg, val); \
} \
return *agg; \
}
#define DATA_T double
#define ADDR_T uint64_t
DEF_SKIP_AGG(agg_count_double)
#undef ADDR_T
#undef DATA_T
#define DATA_T float
#define ADDR_T uint32_t
DEF_SKIP_AGG(agg_count_float)
#undef ADDR_T
#undef DATA_T
// Initial value for nullable column is FLOAT_MIN
extern "C" __device__ void agg_max_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
float old = atomicExch(reinterpret_cast<float*>(agg), -FLT_MAX);
atomicMax(reinterpret_cast<float*>(agg),
__float_as_int(old) == __float_as_int(skip_val) ? val : fmaxf(old, val));
}
}
__device__ float atomicMinFltSkipVal(int32_t* address, float val, const float skip_val) {
float old = atomicExch(reinterpret_cast<float*>(address), FLT_MAX);
return atomicMin(
reinterpret_cast<float*>(address),
__float_as_int(old) == __float_as_int(skip_val) ? val : fminf(old, val));
}
extern "C" __device__ void agg_min_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
atomicMinFltSkipVal(agg, val, skip_val);
}
}
__device__ void atomicSumFltSkipVal(float* address,
const float val,
const float skip_val) {
float old = atomicExch(address, 0.f);
atomicAdd(address, __float_as_int(old) == __float_as_int(skip_val) ? val : (val + old));
}
extern "C" __device__ void agg_sum_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
atomicSumFltSkipVal(reinterpret_cast<float*>(agg), val, skip_val);
}
}
__device__ void atomicSumDblSkipVal(double* address,
const double val,
const double skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
double old = __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(0.)));
atomicAdd(
address,
__double_as_longlong(old) == __double_as_longlong(skip_val) ? val : (val + old));
}
extern "C" __device__ void agg_sum_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (__double_as_longlong(val) != __double_as_longlong(skip_val)) {
atomicSumDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
__device__ double atomicMinDblSkipVal(double* address,
double val,
const double skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull;
unsigned long long int skip_val_as_ull =
*reinterpret_cast<const unsigned long long*>(&skip_val);
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val_as_ull
? *reinterpret_cast<unsigned long long*>(&val)
: __double_as_longlong(min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
extern "C" __device__ void agg_min_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (val != skip_val) {
atomicMinDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
__device__ double atomicMaxDblSkipVal(double* address,
double val,
const double skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int skip_val_as_ull = *((unsigned long long int*)&skip_val);
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val_as_ull
? *((unsigned long long int*)&val)
: __double_as_longlong(max(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
extern "C" __device__ void agg_max_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (val != skip_val) {
atomicMaxDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
#undef DEF_SKIP_AGG
extern "C" __device__ bool slotEmptyKeyCAS(int64_t* slot,
int64_t new_val,
int64_t init_val) {
auto slot_address = reinterpret_cast<unsigned long long int*>(slot);
const auto empty_key =
static_cast<unsigned long long int*>(static_cast<void*>(&init_val));
const auto new_val_cast =
static_cast<unsigned long long int*>(static_cast<void*>(&new_val));
const auto old_val = atomicCAS(slot_address, *empty_key, *new_val_cast);
if (old_val == *empty_key) {
return true;
} else {
return false;
}
}
extern "C" __device__ bool slotEmptyKeyCAS_int32(int32_t* slot,
int32_t new_val,
int32_t init_val) {
unsigned int* slot_address = reinterpret_cast<unsigned int*>(slot);
unsigned int compare_value = static_cast<unsigned int>(init_val);
unsigned int swap_value = static_cast<unsigned int>(new_val);
const unsigned int old_value = atomicCAS(slot_address, compare_value, swap_value);
return old_value == compare_value;
}
extern "C" __device__ bool slotEmptyKeyCAS_int16(int16_t* slot,
int16_t new_val,
int16_t init_val) {
unsigned int* base_slot_address =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3);
unsigned int old_value = *base_slot_address;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
// exit criteria: if init_val does not exist in the slot (some other thread has
// succeeded)
if (static_cast<unsigned int>(init_val) !=
__byte_perm(
compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x2 ? 0x3244 : 0x4410))) {
return false;
}
swap_value = __byte_perm(compare_value,
static_cast<unsigned int>(new_val),
(reinterpret_cast<size_t>(slot) & 0x2) ? 0x5410 : 0x3254);
old_value = atomicCAS(base_slot_address, compare_value, swap_value);
} while (compare_value != old_value);
return true;
}
extern "C" __device__ bool slotEmptyKeyCAS_int8(int8_t* slot,
int8_t new_val,
int8_t init_val) {
// properly align the slot address:
unsigned int* base_slot_address =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3);
constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int old_value = *base_slot_address;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
// exit criteria: if init_val does not exist in the slot (some other thread has
// succeeded)
if (static_cast<unsigned int>(init_val) !=
__byte_perm(compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x3) | 0x4440)) {
return false;
}
swap_value = __byte_perm(compare_value,
static_cast<unsigned int>(new_val),
byte_permutations[reinterpret_cast<size_t>(slot) & 0x3]);
old_value = atomicCAS(base_slot_address, compare_value, swap_value);
} while (compare_value != old_value);
return true;
}
#include "../Utils/ChunkIter.cpp"
#include "DateTruncate.cpp"
#include "ExtractFromTime.cpp"
#define EXECUTE_INCLUDE
#include "ArrayOps.cpp"
#include "DateAdd.cpp"
#include "StringFunctions.cpp"
#undef EXECUTE_INCLUDE
#include "../Utils/Regexp.cpp"
#include "../Utils/StringLike.cpp"
extern "C" __device__ uint64_t string_decode(int8_t* chunk_iter_, int64_t pos) {
// TODO(alex): de-dup, the x64 version is basically identical
ChunkIter* chunk_iter = reinterpret_cast<ChunkIter*>(chunk_iter_);
VarlenDatum vd;
bool is_end;
ChunkIter_get_nth(chunk_iter, pos, false, &vd, &is_end);
return vd.is_null ? 0
: (reinterpret_cast<uint64_t>(vd.pointer) & 0xffffffffffff) |
(static_cast<uint64_t>(vd.length) << 48);
}
extern "C" __device__ void linear_probabilistic_count(uint8_t* bitmap,
const uint32_t bitmap_bytes,
const uint8_t* key_bytes,
const uint32_t key_len) {
const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
const uint32_t word_idx = bit_pos / 32;
const uint32_t bit_idx = bit_pos % 32;
atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx);
}
extern "C" __device__ void agg_count_distinct_bitmap_gpu(int64_t* agg,
const int64_t val,
const int64_t min_val,
const int64_t base_dev_addr,
const int64_t base_host_addr,
const uint64_t sub_bitmap_count,
const uint64_t bitmap_bytes) {
const uint64_t bitmap_idx = val - min_val;
const uint32_t byte_idx = bitmap_idx >> 3;
const uint32_t word_idx = byte_idx >> 2;
const uint32_t byte_word_idx = byte_idx & 3;
const int64_t host_addr = *agg;
uint32_t* bitmap = (uint32_t*)(base_dev_addr + host_addr - base_host_addr +
(threadIdx.x & (sub_bitmap_count - 1)) * bitmap_bytes);
switch (byte_word_idx) {
case 0:
atomicOr(&bitmap[word_idx], 1 << (bitmap_idx & 7));
break;
case 1:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 8));
break;
case 2:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 16));
break;
case 3:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 24));
break;
default:
break;
}
}
extern "C" __device__ void agg_count_distinct_bitmap_skip_val_gpu(
int64_t* agg,
const int64_t val,
const int64_t min_val,
const int64_t skip_val,
const int64_t base_dev_addr,
const int64_t base_host_addr,
const uint64_t sub_bitmap_count,
const uint64_t bitmap_bytes) {
if (val != skip_val) {
agg_count_distinct_bitmap_gpu(
agg, val, min_val, base_dev_addr, base_host_addr, sub_bitmap_count, bitmap_bytes);
}
}
extern "C" __device__ void agg_approximate_count_distinct_gpu(
int64_t* agg,
const int64_t key,
const uint32_t b,
const int64_t base_dev_addr,
const int64_t base_host_addr) {
const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0);
const uint32_t index = hash >> (64 - b);
const int32_t rank = get_rank(hash << b, 64 - b);
const int64_t host_addr = *agg;
int32_t* M = (int32_t*)(base_dev_addr + host_addr - base_host_addr);
atomicMax(&M[index], rank);
}
extern "C" __device__ void force_sync() {
__threadfence_block();
}
extern "C" __device__ void sync_warp() {
#if (TORCH_HIP_VERSION >= 9000)
__syncwarp();
#endif
}
/**
* Protected warp synchornization to make sure all (or none) threads within a warp go
* through a synchronization barrier. thread_pos: the current thread position to be used
* for a memory access row_count: maximum number of rows to be processed The function
* performs warp sync iff all 32 threads within that warp will process valid data NOTE: it
* currently assumes that warp size is 32.
*/
extern "C" __device__ void sync_warp_protected(int64_t thread_pos, int64_t row_count) {
#if (TORCH_HIP_VERSION >= 9000)
// only syncing if NOT within the same warp as those threads experiencing the critical
// edge
if ((((row_count - 1) | 0x1F) - thread_pos) >= 32) {
__syncwarp();
}
#endif
}
extern "C" __device__ void sync_threadblock() {
__syncthreads();
}
/*
* Currently, we just use this function for handling non-grouped aggregates
* with COUNT queries (with GPU shared memory used). Later, we should generate code for
* this depending on the type of aggregate functions.
* TODO: we should use one contiguous global memory buffer, rather than current default
* behaviour of multiple buffers, each for one aggregate. Once that's resolved, we can do
* much cleaner than this function
*/
extern "C" __device__ void write_back_non_grouped_agg(int64_t* input_buffer,
int64_t* output_buffer,
const int32_t agg_idx) {
if (threadIdx.x == agg_idx) {
agg_sum_shared(output_buffer, input_buffer[agg_idx]);
}
}
| 26b704ef8f21cf1718bd4efa01c5914a186af371.cu | #include <cuda.h>
#include <float.h>
#include <stdint.h>
#include <stdio.h>
#include <limits>
#include "BufferCompaction.h"
#include "ExtensionFunctions.hpp"
#include "GpuRtConstants.h"
#include "HyperLogLogRank.h"
#include "TableFunctions/TableFunctions.hpp"
extern "C" __device__ int64_t get_thread_index() {
return threadIdx.x;
}
extern "C" __device__ int64_t get_block_index() {
return blockIdx.x;
}
extern "C" __device__ int32_t pos_start_impl(const int32_t* row_index_resume) {
return blockIdx.x * blockDim.x + threadIdx.x;
}
extern "C" __device__ int32_t group_buff_idx_impl() {
return pos_start_impl(NULL);
}
extern "C" __device__ int32_t pos_step_impl() {
return blockDim.x * gridDim.x;
}
extern "C" __device__ int8_t thread_warp_idx(const int8_t warp_sz) {
return threadIdx.x % warp_sz;
}
extern "C" __device__ const int64_t* init_shared_mem_nop(
const int64_t* groups_buffer,
const int32_t groups_buffer_size) {
return groups_buffer;
}
extern "C" __device__ void write_back_nop(int64_t* dest, int64_t* src, const int32_t sz) {
}
/*
* Just declares and returns a dynamic shared memory pointer. Total size should be
* properly set during kernel launch
*/
extern "C" __device__ int64_t* declare_dynamic_shared_memory() {
extern __shared__ int64_t shared_mem_buffer[];
return shared_mem_buffer;
}
/**
* Initializes the shared memory buffer for perfect hash group by.
* In this function, we simply copy the global group by buffer (already initialized on the
* host and transferred) to all shared memory group by buffers.
*/
extern "C" __device__ const int64_t* init_shared_mem(const int64_t* global_groups_buffer,
const int32_t groups_buffer_size) {
// dynamic shared memory declaration
extern __shared__ int64_t shared_groups_buffer[];
// it is assumed that buffer size is aligned with 64-bit units
// so it is safe to assign 64-bit to each thread
const int32_t buffer_units = groups_buffer_size >> 3;
for (int32_t pos = threadIdx.x; pos < buffer_units; pos += blockDim.x) {
shared_groups_buffer[pos] = global_groups_buffer[pos];
}
__syncthreads();
return shared_groups_buffer;
}
#define init_group_by_buffer_gpu_impl init_group_by_buffer_gpu
#include "GpuInitGroups.cu"
#undef init_group_by_buffer_gpu_impl
// Dynamic watchdog: monitoring up to 64 SMs. E.g. GP100 config may have 60:
// 6 Graphics Processing Clusters (GPCs) * 10 Streaming Multiprocessors
// TODO(Saman): move these into a kernel parameter, allocated and initialized through CUDA
__device__ int64_t dw_sm_cycle_start[128]; // Set from host before launching the kernel
// TODO(Saman): make this cycle budget something constant in codegen level
__device__ int64_t dw_cycle_budget = 0; // Set from host before launching the kernel
__device__ int32_t dw_abort = 0; // TBD: set from host (async)
__device__ int32_t runtime_interrupt_flag = 0;
__inline__ __device__ uint32_t get_smid(void) {
uint32_t ret;
asm("mov.u32 %0, %%smid;" : "=r"(ret));
return ret;
}
/*
* The main objective of this function is to return true, if any of the following two
* scenarios happen:
* 1. receives a host request for aborting the kernel execution
* 2. kernel execution takes longer clock cycles than it was initially allowed
* The assumption is that all (or none) threads within a block return true for the
* watchdog, and the first thread within each block compares the recorded clock cycles for
* its occupying SM with the allowed budget. It also assumes that all threads entering
* this function are active (no critical edge exposure)
* NOTE: dw_cycle_budget, dw_abort, and dw_sm_cycle_start[] are all variables in global
* memory scope.
*/
extern "C" __device__ bool dynamic_watchdog() {
// check for dynamic watchdog, if triggered all threads return true
if (dw_cycle_budget == 0LL) {
return false; // Uninitialized watchdog can't check time
}
if (dw_abort == 1) {
return true; // Received host request to abort
}
uint32_t smid = get_smid();
if (smid >= 128) {
return false;
}
__shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start
__shared__ volatile bool
dw_should_terminate; // all threads within a block should return together if
// watchdog criteria is met
// thread 0 either initializes or read the initial clock cycle, the result is stored
// into shared memory. Since all threads wihtin a block shares the same SM, there's no
// point in using more threads here.
if (threadIdx.x == 0) {
dw_block_cycle_start = 0LL;
int64_t cycle_count = static_cast<int64_t>(clock64());
// Make sure the block hasn't switched SMs
if (smid == get_smid()) {
dw_block_cycle_start = static_cast<int64_t>(
atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]),
0ULL,
static_cast<unsigned long long>(cycle_count)));
}
int64_t cycles = cycle_count - dw_block_cycle_start;
if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) &&
(cycles > dw_cycle_budget)) {
// Check if we're out of time on this particular SM
dw_should_terminate = true;
} else {
dw_should_terminate = false;
}
}
__syncthreads();
return dw_should_terminate;
}
extern "C" __device__ bool check_interrupt() {
return (runtime_interrupt_flag == 1) ? true : false;
}
template <typename T = unsigned long long>
inline __device__ T get_empty_key() {
return EMPTY_KEY_64;
}
template <>
inline __device__ unsigned int get_empty_key() {
return EMPTY_KEY_32;
}
template <typename T>
inline __device__ int64_t* get_matching_group_value(int64_t* groups_buffer,
const uint32_t h,
const T* key,
const uint32_t key_count,
const uint32_t row_size_quad) {
const T empty_key = get_empty_key<T>();
uint32_t off = h * row_size_quad;
auto row_ptr = reinterpret_cast<T*>(groups_buffer + off);
{
const T old = atomicCAS(row_ptr, empty_key, *key);
if (empty_key == old && key_count > 1) {
for (size_t i = 1; i <= key_count - 1; ++i) {
atomicExch(row_ptr + i, key[i]);
}
}
}
if (key_count > 1) {
while (atomicAdd(row_ptr + key_count - 1, 0) == empty_key) {
// spin until the winning thread has finished writing the entire key and the init
// value
}
}
bool match = true;
for (uint32_t i = 0; i < key_count; ++i) {
if (row_ptr[i] != key[i]) {
match = false;
break;
}
}
if (match) {
auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count);
return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8));
}
return NULL;
}
extern "C" __device__ int64_t* get_matching_group_value(int64_t* groups_buffer,
const uint32_t h,
const int64_t* key,
const uint32_t key_count,
const uint32_t key_width,
const uint32_t row_size_quad,
const int64_t* init_vals) {
switch (key_width) {
case 4:
return get_matching_group_value(groups_buffer,
h,
reinterpret_cast<const unsigned int*>(key),
key_count,
row_size_quad);
case 8:
return get_matching_group_value(groups_buffer,
h,
reinterpret_cast<const unsigned long long*>(key),
key_count,
row_size_quad);
default:
return NULL;
}
}
template <typename T>
__device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer,
const uint32_t entry_count,
const uint32_t h,
const T* key,
const uint32_t key_count) {
const T empty_key = get_empty_key<T>();
const uint64_t old =
atomicCAS(reinterpret_cast<T*>(groups_buffer + h), empty_key, *key);
// the winner thread proceeds with writing the rest fo the keys
if (old == empty_key) {
uint32_t offset = h + entry_count;
for (size_t i = 1; i < key_count; ++i) {
*reinterpret_cast<T*>(groups_buffer + offset) = key[i];
offset += entry_count;
}
}
__threadfence();
// for all threads except the winning thread, memory content of the keys
// related to the hash offset are checked again. In case of a complete match
// the hash offset is returned, otherwise -1 is returned
if (old != empty_key) {
uint32_t offset = h;
for (uint32_t i = 0; i < key_count; ++i) {
if (*reinterpret_cast<T*>(groups_buffer + offset) != key[i]) {
return -1;
}
offset += entry_count;
}
}
return h;
}
extern "C" __device__ int32_t
get_matching_group_value_columnar_slot(int64_t* groups_buffer,
const uint32_t entry_count,
const uint32_t h,
const int64_t* key,
const uint32_t key_count,
const uint32_t key_width) {
switch (key_width) {
case 4:
return get_matching_group_value_columnar_slot(
groups_buffer,
entry_count,
h,
reinterpret_cast<const unsigned int*>(key),
key_count);
case 8:
return get_matching_group_value_columnar_slot(
groups_buffer,
entry_count,
h,
reinterpret_cast<const unsigned long long*>(key),
key_count);
default:
return -1;
}
}
extern "C" __device__ int64_t* get_matching_group_value_columnar(
int64_t* groups_buffer,
const uint32_t h,
const int64_t* key,
const uint32_t key_qw_count,
const size_t entry_count) {
uint32_t off = h;
{
const uint64_t old = atomicCAS(
reinterpret_cast<unsigned long long*>(groups_buffer + off), EMPTY_KEY_64, *key);
if (EMPTY_KEY_64 == old) {
for (size_t i = 0; i < key_qw_count; ++i) {
groups_buffer[off] = key[i];
off += entry_count;
}
return &groups_buffer[off];
}
}
__syncthreads();
off = h;
for (size_t i = 0; i < key_qw_count; ++i) {
if (groups_buffer[off] != key[i]) {
return NULL;
}
off += entry_count;
}
return &groups_buffer[off];
}
#include "GroupByRuntime.cpp"
#include "JoinHashTableQueryRuntime.cpp"
#include "MurmurHash.cpp"
#include "TopKRuntime.cpp"
__device__ int64_t atomicMax64(int64_t* address, int64_t val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, max((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
__device__ int64_t atomicMin64(int64_t* address, int64_t val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, min((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
// As of 20160418, CUDA 8.0EA only defines `atomicAdd(double*, double)` for compute
// capability >= 6.0.
#if CUDA_VERSION < 8000 || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600)
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__ double atomicMax(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(max(val, __longlong_as_double(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ float atomicMax(float* address, float val) {
int* address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_int, assumed, __float_as_int(max(val, __int_as_float(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __int_as_float(old);
}
__device__ double atomicMin(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
__double_as_longlong(min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ double atomicMin(float* address, float val) {
int* address_as_ull = (int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_ull, assumed, __float_as_int(min(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
extern "C" __device__ uint64_t agg_count_shared(uint64_t* agg, const int64_t val) {
return static_cast<uint64_t>(atomicAdd(reinterpret_cast<uint32_t*>(agg), 1UL));
}
extern "C" __device__ uint32_t agg_count_int32_shared(uint32_t* agg, const int32_t val) {
return atomicAdd(agg, 1UL);
}
extern "C" __device__ uint64_t agg_count_double_shared(uint64_t* agg, const double val) {
return agg_count_shared(agg, val);
}
extern "C" __device__ uint32_t agg_count_float_shared(uint32_t* agg, const float val) {
return agg_count_int32_shared(agg, val);
}
extern "C" __device__ int64_t agg_sum_shared(int64_t* agg, const int64_t val) {
return atomicAdd(reinterpret_cast<unsigned long long*>(agg), val);
}
extern "C" __device__ int32_t agg_sum_int32_shared(int32_t* agg, const int32_t val) {
return atomicAdd(agg, val);
}
extern "C" __device__ void agg_sum_float_shared(int32_t* agg, const float val) {
atomicAdd(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_sum_double_shared(int64_t* agg, const double val) {
atomicAdd(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_max_shared(int64_t* agg, const int64_t val) {
atomicMax64(agg, val);
}
extern "C" __device__ void agg_max_int32_shared(int32_t* agg, const int32_t val) {
atomicMax(agg, val);
}
extern "C" __device__ void agg_max_double_shared(int64_t* agg, const double val) {
atomicMax(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_max_float_shared(int32_t* agg, const float val) {
atomicMax(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_min_shared(int64_t* agg, const int64_t val) {
atomicMin64(agg, val);
}
extern "C" __device__ void agg_min_int32_shared(int32_t* agg, const int32_t val) {
atomicMin(agg, val);
}
// TODO(Saman): use 16-bit atomicCAS for Turing
extern "C" __device__ void atomicMax16(int16_t* agg, const int16_t val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
swap_value =
(reinterpret_cast<size_t>(agg) & 0x2)
? static_cast<unsigned int>(max(static_cast<int16_t>(old_value >> 16), val))
<< 16 |
(old_value & 0xFFFF)
: (old_value & 0xFFFF0000) |
static_cast<unsigned int>(
max(static_cast<int16_t>(old_value & 0xFFFF), val));
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (old_value != compare_value);
}
extern "C" __device__ void atomicMax8(int8_t* agg, const int8_t val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
// __byte_perm(unsigned int A, unsigned int B, unsigned int s):
// if s == 0x3214 returns {A[31..24], A[23..16], A[15..8], B[7..0]}
// if s == 0x3240 returns {A[31..24], A[23..16], B[7...0], A[7..0]}
// if s == 0x3410 returns {A[31..24], B[7....0], A[15..8], A[7..0]}
// if s == 0x4210 returns {B[7....0], A[23..16], A[15..8], A[7..0]}
constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
auto max_value = static_cast<unsigned int>(
// compare val with its corresponding bits in the compare_value
max(val,
static_cast<int8_t>(__byte_perm(
compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440))));
swap_value = __byte_perm(
compare_value, max_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]);
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (compare_value != old_value);
}
extern "C" __device__ void atomicMin16(int16_t* agg, const int16_t val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
swap_value =
(reinterpret_cast<size_t>(agg) & 0x2)
? static_cast<unsigned int>(min(static_cast<int16_t>(old_value >> 16), val))
<< 16 |
(old_value & 0xFFFF)
: (old_value & 0xFFFF0000) |
static_cast<unsigned int>(
min(static_cast<int16_t>(old_value & 0xFFFF), val));
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (old_value != compare_value);
}
extern "C" __device__ void atomicMin16SkipVal(int16_t* agg,
const int16_t val,
const int16_t skip_val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
int16_t selected_old_val = (reinterpret_cast<size_t>(agg) & 0x2)
? static_cast<int16_t>(old_value >> 16)
: static_cast<int16_t>(old_value & 0xFFFF);
swap_value =
(reinterpret_cast<size_t>(agg) & 0x2)
? static_cast<unsigned int>(
selected_old_val == skip_val ? val : min(selected_old_val, val))
<< 16 |
(old_value & 0xFFFF)
: (old_value & 0xFFFF0000) |
static_cast<unsigned int>(
selected_old_val == skip_val ? val : min(selected_old_val, val));
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (old_value != compare_value);
}
extern "C" __device__ void atomicMin8(int8_t* agg, const int8_t val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
auto min_value = static_cast<unsigned int>(
min(val,
static_cast<int8_t>(__byte_perm(
compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440))));
swap_value = __byte_perm(
compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]);
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (compare_value != old_value);
}
extern "C" __device__ void atomicMin8SkipVal(int8_t* agg,
const int8_t val,
const int8_t skip_val) {
// properly align the input pointer:
unsigned int* base_address_u32 =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3);
constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int old_value = *base_address_u32;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
int8_t selected_old_val = static_cast<int8_t>(
__byte_perm(compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440));
auto min_value = static_cast<unsigned int>(
selected_old_val == skip_val ? val : min(val, selected_old_val));
swap_value = __byte_perm(
compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]);
old_value = atomicCAS(base_address_u32, compare_value, swap_value);
} while (compare_value != old_value);
}
extern "C" __device__ void agg_max_int16_shared(int16_t* agg, const int16_t val) {
return atomicMax16(agg, val);
}
extern "C" __device__ void agg_max_int8_shared(int8_t* agg, const int8_t val) {
return atomicMax8(agg, val);
}
extern "C" __device__ void agg_min_int16_shared(int16_t* agg, const int16_t val) {
return atomicMin16(agg, val);
}
extern "C" __device__ void agg_min_int8_shared(int8_t* agg, const int8_t val) {
return atomicMin8(agg, val);
}
extern "C" __device__ void agg_min_double_shared(int64_t* agg, const double val) {
atomicMin(reinterpret_cast<double*>(agg), val);
}
extern "C" __device__ void agg_min_float_shared(int32_t* agg, const float val) {
atomicMin(reinterpret_cast<float*>(agg), val);
}
extern "C" __device__ void agg_id_shared(int64_t* agg, const int64_t val) {
*agg = val;
}
extern "C" __device__ int32_t checked_single_agg_id_shared(int64_t* agg,
const int64_t val,
const int64_t null_val) {
unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg);
unsigned long long int old = *address_as_ull, assumed;
if (val == null_val) {
return 0;
}
do {
if (static_cast<int64_t>(old) != null_val) {
if (static_cast<int64_t>(old) != val) {
// see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
return 15;
} else {
break;
}
}
assumed = old;
old = atomicCAS(address_as_ull, assumed, val);
} while (assumed != old);
return 0;
}
#define DEF_AGG_ID_INT_SHARED(n) \
extern "C" __device__ void agg_id_int##n##_shared(int##n##_t* agg, \
const int##n##_t val) { \
*agg = val; \
}
DEF_AGG_ID_INT_SHARED(32)
DEF_AGG_ID_INT_SHARED(16)
DEF_AGG_ID_INT_SHARED(8)
#undef DEF_AGG_ID_INT_SHARED
extern "C" __device__ void agg_id_double_shared(int64_t* agg, const double val) {
*agg = *(reinterpret_cast<const int64_t*>(&val));
}
extern "C" __device__ int32_t checked_single_agg_id_double_shared(int64_t* agg,
const double val,
const double null_val) {
unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg);
unsigned long long int old = *address_as_ull, assumed;
if (val == null_val) {
return 0;
}
do {
if (static_cast<int64_t>(old) != __double_as_longlong(null_val)) {
if (static_cast<int64_t>(old) != __double_as_longlong(val)) {
// see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
return 15;
} else {
break;
}
}
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val));
} while (assumed != old);
return 0;
}
extern "C" __device__ void agg_id_double_shared_slow(int64_t* agg, const double* val) {
*agg = *(reinterpret_cast<const int64_t*>(val));
}
extern "C" __device__ int32_t
checked_single_agg_id_double_shared_slow(int64_t* agg,
const double* valp,
const double null_val) {
unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(agg);
unsigned long long int old = *address_as_ull, assumed;
double val = *valp;
if (val == null_val) {
return 0;
}
do {
if (static_cast<int64_t>(old) != __double_as_longlong(null_val)) {
if (static_cast<int64_t>(old) != __double_as_longlong(val)) {
// see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
return 15;
} else {
break;
}
}
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val));
} while (assumed != old);
return 0;
}
extern "C" __device__ void agg_id_float_shared(int32_t* agg, const float val) {
*agg = __float_as_int(val);
}
extern "C" __device__ int32_t checked_single_agg_id_float_shared(int32_t* agg,
const float val,
const float null_val) {
int* address_as_ull = reinterpret_cast<int*>(agg);
int old = *address_as_ull, assumed;
if (val == null_val) {
return 0;
}
do {
if (old != __float_as_int(null_val)) {
if (old != __float_as_int(val)) {
// see Execute::ERR_SINGLE_VALUE_FOUND_MULTIPLE_VALUES
return 15;
} else {
break;
}
}
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val));
} while (assumed != old);
return 0;
}
#define DEF_SKIP_AGG(base_agg_func) \
extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
return base_agg_func##_shared(agg, val); \
} \
return 0; \
}
#define DATA_T int64_t
#define ADDR_T uint64_t
DEF_SKIP_AGG(agg_count)
#undef DATA_T
#undef ADDR_T
#define DATA_T int32_t
#define ADDR_T uint32_t
DEF_SKIP_AGG(agg_count_int32)
#undef DATA_T
#undef ADDR_T
// Initial value for nullable column is INT32_MIN
extern "C" __device__ void agg_max_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
agg_max_int32_shared(agg, val);
}
}
extern "C" __device__ void agg_max_int16_skip_val_shared(int16_t* agg,
const int16_t val,
const int16_t skip_val) {
if (val != skip_val) {
agg_max_int16_shared(agg, val);
}
}
extern "C" __device__ void agg_min_int16_skip_val_shared(int16_t* agg,
const int16_t val,
const int16_t skip_val) {
if (val != skip_val) {
atomicMin16SkipVal(agg, val, skip_val);
}
}
extern "C" __device__ void agg_max_int8_skip_val_shared(int8_t* agg,
const int8_t val,
const int8_t skip_val) {
if (val != skip_val) {
agg_max_int8_shared(agg, val);
}
}
extern "C" __device__ void agg_min_int8_skip_val_shared(int8_t* agg,
const int8_t val,
const int8_t skip_val) {
if (val != skip_val) {
atomicMin8SkipVal(agg, val, skip_val);
}
}
__device__ int32_t atomicMin32SkipVal(int32_t* address,
int32_t val,
const int32_t skip_val) {
int32_t old = atomicExch(address, INT_MAX);
return atomicMin(address, old == skip_val ? val : min(old, val));
}
extern "C" __device__ void agg_min_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
atomicMin32SkipVal(agg, val, skip_val);
}
}
__device__ int32_t atomicSum32SkipVal(int32_t* address,
const int32_t val,
const int32_t skip_val) {
unsigned int* address_as_int = (unsigned int*)address;
int32_t old = atomicExch(address_as_int, 0);
int32_t old2 = atomicAdd(address_as_int, old == skip_val ? val : (val + old));
return old == skip_val ? old2 : (old2 + old);
}
extern "C" __device__ int32_t agg_sum_int32_skip_val_shared(int32_t* agg,
const int32_t val,
const int32_t skip_val) {
if (val != skip_val) {
const int32_t old = atomicSum32SkipVal(agg, val, skip_val);
return old;
}
return 0;
}
__device__ int64_t atomicSum64SkipVal(int64_t* address,
const int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
int64_t old = atomicExch(address_as_ull, 0);
int64_t old2 = atomicAdd(address_as_ull, old == skip_val ? val : (val + old));
return old == skip_val ? old2 : (old2 + old);
}
extern "C" __device__ int64_t agg_sum_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
return atomicSum64SkipVal(agg, val, skip_val);
}
return 0;
}
__device__ int64_t atomicMin64SkipVal(int64_t* address,
int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val ? val : min((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
extern "C" __device__ void agg_min_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
atomicMin64SkipVal(agg, val, skip_val);
}
}
__device__ int64_t atomicMax64SkipVal(int64_t* address,
int64_t val,
const int64_t skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val ? val : max((long long)val, (long long)assumed));
} while (assumed != old);
return old;
}
extern "C" __device__ void agg_max_skip_val_shared(int64_t* agg,
const int64_t val,
const int64_t skip_val) {
if (val != skip_val) {
atomicMax64SkipVal(agg, val, skip_val);
}
}
#undef DEF_SKIP_AGG
#define DEF_SKIP_AGG(base_agg_func) \
extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \
ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \
if (val != skip_val) { \
return base_agg_func##_shared(agg, val); \
} \
return *agg; \
}
#define DATA_T double
#define ADDR_T uint64_t
DEF_SKIP_AGG(agg_count_double)
#undef ADDR_T
#undef DATA_T
#define DATA_T float
#define ADDR_T uint32_t
DEF_SKIP_AGG(agg_count_float)
#undef ADDR_T
#undef DATA_T
// Initial value for nullable column is FLOAT_MIN
extern "C" __device__ void agg_max_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
float old = atomicExch(reinterpret_cast<float*>(agg), -FLT_MAX);
atomicMax(reinterpret_cast<float*>(agg),
__float_as_int(old) == __float_as_int(skip_val) ? val : fmaxf(old, val));
}
}
__device__ float atomicMinFltSkipVal(int32_t* address, float val, const float skip_val) {
float old = atomicExch(reinterpret_cast<float*>(address), FLT_MAX);
return atomicMin(
reinterpret_cast<float*>(address),
__float_as_int(old) == __float_as_int(skip_val) ? val : fminf(old, val));
}
extern "C" __device__ void agg_min_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
atomicMinFltSkipVal(agg, val, skip_val);
}
}
__device__ void atomicSumFltSkipVal(float* address,
const float val,
const float skip_val) {
float old = atomicExch(address, 0.f);
atomicAdd(address, __float_as_int(old) == __float_as_int(skip_val) ? val : (val + old));
}
extern "C" __device__ void agg_sum_float_skip_val_shared(int32_t* agg,
const float val,
const float skip_val) {
if (__float_as_int(val) != __float_as_int(skip_val)) {
atomicSumFltSkipVal(reinterpret_cast<float*>(agg), val, skip_val);
}
}
__device__ void atomicSumDblSkipVal(double* address,
const double val,
const double skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
double old = __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(0.)));
atomicAdd(
address,
__double_as_longlong(old) == __double_as_longlong(skip_val) ? val : (val + old));
}
extern "C" __device__ void agg_sum_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (__double_as_longlong(val) != __double_as_longlong(skip_val)) {
atomicSumDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
__device__ double atomicMinDblSkipVal(double* address,
double val,
const double skip_val) {
unsigned long long int* address_as_ull =
reinterpret_cast<unsigned long long int*>(address);
unsigned long long int old = *address_as_ull;
unsigned long long int skip_val_as_ull =
*reinterpret_cast<const unsigned long long*>(&skip_val);
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val_as_ull
? *reinterpret_cast<unsigned long long*>(&val)
: __double_as_longlong(min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
extern "C" __device__ void agg_min_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (val != skip_val) {
atomicMinDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
__device__ double atomicMaxDblSkipVal(double* address,
double val,
const double skip_val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int skip_val_as_ull = *((unsigned long long int*)&skip_val);
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull,
assumed,
assumed == skip_val_as_ull
? *((unsigned long long int*)&val)
: __double_as_longlong(max(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
extern "C" __device__ void agg_max_double_skip_val_shared(int64_t* agg,
const double val,
const double skip_val) {
if (val != skip_val) {
atomicMaxDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val);
}
}
#undef DEF_SKIP_AGG
extern "C" __device__ bool slotEmptyKeyCAS(int64_t* slot,
int64_t new_val,
int64_t init_val) {
auto slot_address = reinterpret_cast<unsigned long long int*>(slot);
const auto empty_key =
static_cast<unsigned long long int*>(static_cast<void*>(&init_val));
const auto new_val_cast =
static_cast<unsigned long long int*>(static_cast<void*>(&new_val));
const auto old_val = atomicCAS(slot_address, *empty_key, *new_val_cast);
if (old_val == *empty_key) {
return true;
} else {
return false;
}
}
extern "C" __device__ bool slotEmptyKeyCAS_int32(int32_t* slot,
int32_t new_val,
int32_t init_val) {
unsigned int* slot_address = reinterpret_cast<unsigned int*>(slot);
unsigned int compare_value = static_cast<unsigned int>(init_val);
unsigned int swap_value = static_cast<unsigned int>(new_val);
const unsigned int old_value = atomicCAS(slot_address, compare_value, swap_value);
return old_value == compare_value;
}
extern "C" __device__ bool slotEmptyKeyCAS_int16(int16_t* slot,
int16_t new_val,
int16_t init_val) {
unsigned int* base_slot_address =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3);
unsigned int old_value = *base_slot_address;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
// exit criteria: if init_val does not exist in the slot (some other thread has
// succeeded)
if (static_cast<unsigned int>(init_val) !=
__byte_perm(
compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x2 ? 0x3244 : 0x4410))) {
return false;
}
swap_value = __byte_perm(compare_value,
static_cast<unsigned int>(new_val),
(reinterpret_cast<size_t>(slot) & 0x2) ? 0x5410 : 0x3254);
old_value = atomicCAS(base_slot_address, compare_value, swap_value);
} while (compare_value != old_value);
return true;
}
extern "C" __device__ bool slotEmptyKeyCAS_int8(int8_t* slot,
int8_t new_val,
int8_t init_val) {
// properly align the slot address:
unsigned int* base_slot_address =
reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3);
constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int old_value = *base_slot_address;
unsigned int swap_value, compare_value;
do {
compare_value = old_value;
// exit criteria: if init_val does not exist in the slot (some other thread has
// succeeded)
if (static_cast<unsigned int>(init_val) !=
__byte_perm(compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x3) | 0x4440)) {
return false;
}
swap_value = __byte_perm(compare_value,
static_cast<unsigned int>(new_val),
byte_permutations[reinterpret_cast<size_t>(slot) & 0x3]);
old_value = atomicCAS(base_slot_address, compare_value, swap_value);
} while (compare_value != old_value);
return true;
}
#include "../Utils/ChunkIter.cpp"
#include "DateTruncate.cpp"
#include "ExtractFromTime.cpp"
#define EXECUTE_INCLUDE
#include "ArrayOps.cpp"
#include "DateAdd.cpp"
#include "StringFunctions.cpp"
#undef EXECUTE_INCLUDE
#include "../Utils/Regexp.cpp"
#include "../Utils/StringLike.cpp"
extern "C" __device__ uint64_t string_decode(int8_t* chunk_iter_, int64_t pos) {
// TODO(alex): de-dup, the x64 version is basically identical
ChunkIter* chunk_iter = reinterpret_cast<ChunkIter*>(chunk_iter_);
VarlenDatum vd;
bool is_end;
ChunkIter_get_nth(chunk_iter, pos, false, &vd, &is_end);
return vd.is_null ? 0
: (reinterpret_cast<uint64_t>(vd.pointer) & 0xffffffffffff) |
(static_cast<uint64_t>(vd.length) << 48);
}
extern "C" __device__ void linear_probabilistic_count(uint8_t* bitmap,
const uint32_t bitmap_bytes,
const uint8_t* key_bytes,
const uint32_t key_len) {
const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8);
const uint32_t word_idx = bit_pos / 32;
const uint32_t bit_idx = bit_pos % 32;
atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx);
}
extern "C" __device__ void agg_count_distinct_bitmap_gpu(int64_t* agg,
const int64_t val,
const int64_t min_val,
const int64_t base_dev_addr,
const int64_t base_host_addr,
const uint64_t sub_bitmap_count,
const uint64_t bitmap_bytes) {
const uint64_t bitmap_idx = val - min_val;
const uint32_t byte_idx = bitmap_idx >> 3;
const uint32_t word_idx = byte_idx >> 2;
const uint32_t byte_word_idx = byte_idx & 3;
const int64_t host_addr = *agg;
uint32_t* bitmap = (uint32_t*)(base_dev_addr + host_addr - base_host_addr +
(threadIdx.x & (sub_bitmap_count - 1)) * bitmap_bytes);
switch (byte_word_idx) {
case 0:
atomicOr(&bitmap[word_idx], 1 << (bitmap_idx & 7));
break;
case 1:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 8));
break;
case 2:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 16));
break;
case 3:
atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 24));
break;
default:
break;
}
}
extern "C" __device__ void agg_count_distinct_bitmap_skip_val_gpu(
int64_t* agg,
const int64_t val,
const int64_t min_val,
const int64_t skip_val,
const int64_t base_dev_addr,
const int64_t base_host_addr,
const uint64_t sub_bitmap_count,
const uint64_t bitmap_bytes) {
if (val != skip_val) {
agg_count_distinct_bitmap_gpu(
agg, val, min_val, base_dev_addr, base_host_addr, sub_bitmap_count, bitmap_bytes);
}
}
extern "C" __device__ void agg_approximate_count_distinct_gpu(
int64_t* agg,
const int64_t key,
const uint32_t b,
const int64_t base_dev_addr,
const int64_t base_host_addr) {
const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0);
const uint32_t index = hash >> (64 - b);
const int32_t rank = get_rank(hash << b, 64 - b);
const int64_t host_addr = *agg;
int32_t* M = (int32_t*)(base_dev_addr + host_addr - base_host_addr);
atomicMax(&M[index], rank);
}
extern "C" __device__ void force_sync() {
__threadfence_block();
}
extern "C" __device__ void sync_warp() {
#if (CUDA_VERSION >= 9000)
__syncwarp();
#endif
}
/**
* Protected warp synchornization to make sure all (or none) threads within a warp go
* through a synchronization barrier. thread_pos: the current thread position to be used
* for a memory access row_count: maximum number of rows to be processed The function
* performs warp sync iff all 32 threads within that warp will process valid data NOTE: it
* currently assumes that warp size is 32.
*/
extern "C" __device__ void sync_warp_protected(int64_t thread_pos, int64_t row_count) {
#if (CUDA_VERSION >= 9000)
// only syncing if NOT within the same warp as those threads experiencing the critical
// edge
if ((((row_count - 1) | 0x1F) - thread_pos) >= 32) {
__syncwarp();
}
#endif
}
extern "C" __device__ void sync_threadblock() {
__syncthreads();
}
/*
* Currently, we just use this function for handling non-grouped aggregates
* with COUNT queries (with GPU shared memory used). Later, we should generate code for
* this depending on the type of aggregate functions.
* TODO: we should use one contiguous global memory buffer, rather than current default
* behaviour of multiple buffers, each for one aggregate. Once that's resolved, we can do
* much cleaner than this function
*/
extern "C" __device__ void write_back_non_grouped_agg(int64_t* input_buffer,
int64_t* output_buffer,
const int32_t agg_idx) {
if (threadIdx.x == agg_idx) {
agg_sum_shared(output_buffer, input_buffer[agg_idx]);
}
}
|
7b60024ca6780e3e4790560204976bbac9d828ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
NV12ToARGB color space conversion CUDA kernel
This sample uses CUDA to perform a simple NV12 (YUV 4:2:0 planar)
source and converts to output in ARGB format
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cudaProcessFrame.h"
__constant__ uint32 constAlpha;
#define MUL(x,y) (x*y)
__constant__ float constHueColorSpaceMat[9];
__device__ void YUV2RGB(uint32 *yuvi, float *red, float *green, float *blue)
{
float luma, chromaCb, chromaCr;
// Prepare for hue adjustment
luma = (float)yuvi[0];
chromaCb = (float)((int32)yuvi[1] - 512.0f);
chromaCr = (float)((int32)yuvi[2] - 512.0f);
// Convert YUV To RGB with hue adjustment
*red = MUL(luma, constHueColorSpaceMat[0]) +
MUL(chromaCb, constHueColorSpaceMat[1]) +
MUL(chromaCr, constHueColorSpaceMat[2]);
*green= MUL(luma, constHueColorSpaceMat[3]) +
MUL(chromaCb, constHueColorSpaceMat[4]) +
MUL(chromaCr, constHueColorSpaceMat[5]);
*blue = MUL(luma, constHueColorSpaceMat[6]) +
MUL(chromaCb, constHueColorSpaceMat[7]) +
MUL(chromaCr, constHueColorSpaceMat[8]);
}
__device__ uint32 RGBAPACK_8bit(float red, float green, float blue, uint32 alpha)
{
uint32 ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 255.0f);
green = min(max(green, 0.0f), 255.0f);
blue = min(max(blue, 0.0f), 255.0f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = (((uint32)blue) |
(((uint32)green) << 8) |
(((uint32)red) << 16) | (uint32)alpha);
return ARGBpixel;
}
__device__ uint32 RGBAPACK_10bit(float red, float green, float blue, uint32 alpha)
{
uint32 ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 1023.f);
green = min(max(green, 0.0f), 1023.f);
blue = min(max(blue, 0.0f), 1023.f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = (((uint32)blue >> 2) |
(((uint32)green >> 2) << 8) |
(((uint32)red >> 2) << 16) | (uint32)alpha);
return ARGBpixel;
}
// CUDA kernel for outputing the final ARGB output from NV12;
extern "C"
__global__ void Passthru_drvapi(uint32 *srcImage, size_t nSourcePitch,
uint32 *dstImage, size_t nDestPitch,
uint32 width, uint32 height)
{
int32 x, y;
uint32 yuv101010Pel[2];
uint32 processingPitch = ((width) + 63) & ~63;
uint32 dstImagePitch = nDestPitch >> 2;
uint8 *srcImageU8 = (uint8 *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]);
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]);
// this steps performs the color conversion
float luma[2];
luma[0] = (yuv101010Pel[0] & 0x00FF);
luma[1] = (yuv101010Pel[1] & 0x00FF);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_8bit(luma[0], luma[0], luma[0], constAlpha);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_8bit(luma[1], luma[1], luma[1], constAlpha);
}
// CUDA kernel for outputing the final ARGB output from NV12;
extern "C"
__global__ void NV12ToARGB_drvapi(uint32 *srcImage, size_t nSourcePitch, int nBitDepthInBytes,
uint32 *dstImage, size_t nDestPitch,
uint32 width, uint32 height)
{
int32 x, y;
uint32 yuv101010Pel[2];
uint32 processingPitch = ((width) + 63) & ~63;
uint32 dstImagePitch = nDestPitch >> 2;
uint8 *srcImageU8 = (uint8 *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x * nBitDepthInBytes + (nBitDepthInBytes - 1)]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + (x + 1) * nBitDepthInBytes + (nBitDepthInBytes - 1)]) << 2;
uint32 chromaOffset = processingPitch * height;
int32 y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32 chromaCb;
uint32 chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x * nBitDepthInBytes + (nBitDepthInBytes - 1)];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + (x + 1) * nBitDepthInBytes + (nBitDepthInBytes - 1)];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x * nBitDepthInBytes + (nBitDepthInBytes - 1)] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + (x + 1) * nBitDepthInBytes + (nBitDepthInBytes - 1)] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x * nBitDepthInBytes + (nBitDepthInBytes - 1)] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + (x + 1) * nBitDepthInBytes + (nBitDepthInBytes - 1)] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x * nBitDepthInBytes + (nBitDepthInBytes - 1)] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + (x + 1) * nBitDepthInBytes + (nBitDepthInBytes - 1)] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
uint32 yuvi[6];
float red[2], green[2], blue[2];
yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK);
yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK);
yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
// YUV to RGB Transformation conversion
YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_10bit(red[0], green[0], blue[0], constAlpha);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_10bit(red[1], green[1], blue[1], constAlpha);
}
| 7b60024ca6780e3e4790560204976bbac9d828ee.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
NV12ToARGB color space conversion CUDA kernel
This sample uses CUDA to perform a simple NV12 (YUV 4:2:0 planar)
source and converts to output in ARGB format
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cudaProcessFrame.h"
__constant__ uint32 constAlpha;
#define MUL(x,y) (x*y)
__constant__ float constHueColorSpaceMat[9];
__device__ void YUV2RGB(uint32 *yuvi, float *red, float *green, float *blue)
{
float luma, chromaCb, chromaCr;
// Prepare for hue adjustment
luma = (float)yuvi[0];
chromaCb = (float)((int32)yuvi[1] - 512.0f);
chromaCr = (float)((int32)yuvi[2] - 512.0f);
// Convert YUV To RGB with hue adjustment
*red = MUL(luma, constHueColorSpaceMat[0]) +
MUL(chromaCb, constHueColorSpaceMat[1]) +
MUL(chromaCr, constHueColorSpaceMat[2]);
*green= MUL(luma, constHueColorSpaceMat[3]) +
MUL(chromaCb, constHueColorSpaceMat[4]) +
MUL(chromaCr, constHueColorSpaceMat[5]);
*blue = MUL(luma, constHueColorSpaceMat[6]) +
MUL(chromaCb, constHueColorSpaceMat[7]) +
MUL(chromaCr, constHueColorSpaceMat[8]);
}
__device__ uint32 RGBAPACK_8bit(float red, float green, float blue, uint32 alpha)
{
uint32 ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 255.0f);
green = min(max(green, 0.0f), 255.0f);
blue = min(max(blue, 0.0f), 255.0f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = (((uint32)blue) |
(((uint32)green) << 8) |
(((uint32)red) << 16) | (uint32)alpha);
return ARGBpixel;
}
__device__ uint32 RGBAPACK_10bit(float red, float green, float blue, uint32 alpha)
{
uint32 ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 1023.f);
green = min(max(green, 0.0f), 1023.f);
blue = min(max(blue, 0.0f), 1023.f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = (((uint32)blue >> 2) |
(((uint32)green >> 2) << 8) |
(((uint32)red >> 2) << 16) | (uint32)alpha);
return ARGBpixel;
}
// CUDA kernel for outputing the final ARGB output from NV12;
extern "C"
__global__ void Passthru_drvapi(uint32 *srcImage, size_t nSourcePitch,
uint32 *dstImage, size_t nDestPitch,
uint32 width, uint32 height)
{
int32 x, y;
uint32 yuv101010Pel[2];
uint32 processingPitch = ((width) + 63) & ~63;
uint32 dstImagePitch = nDestPitch >> 2;
uint8 *srcImageU8 = (uint8 *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]);
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]);
// this steps performs the color conversion
float luma[2];
luma[0] = (yuv101010Pel[0] & 0x00FF);
luma[1] = (yuv101010Pel[1] & 0x00FF);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_8bit(luma[0], luma[0], luma[0], constAlpha);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_8bit(luma[1], luma[1], luma[1], constAlpha);
}
// CUDA kernel for outputing the final ARGB output from NV12;
extern "C"
__global__ void NV12ToARGB_drvapi(uint32 *srcImage, size_t nSourcePitch, int nBitDepthInBytes,
uint32 *dstImage, size_t nDestPitch,
uint32 width, uint32 height)
{
int32 x, y;
uint32 yuv101010Pel[2];
uint32 processingPitch = ((width) + 63) & ~63;
uint32 dstImagePitch = nDestPitch >> 2;
uint8 *srcImageU8 = (uint8 *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x * nBitDepthInBytes + (nBitDepthInBytes - 1)]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + (x + 1) * nBitDepthInBytes + (nBitDepthInBytes - 1)]) << 2;
uint32 chromaOffset = processingPitch * height;
int32 y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32 chromaCb;
uint32 chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x * nBitDepthInBytes + (nBitDepthInBytes - 1)];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + (x + 1) * nBitDepthInBytes + (nBitDepthInBytes - 1)];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x * nBitDepthInBytes + (nBitDepthInBytes - 1)] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + (x + 1) * nBitDepthInBytes + (nBitDepthInBytes - 1)] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x * nBitDepthInBytes + (nBitDepthInBytes - 1)] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + (x + 1) * nBitDepthInBytes + (nBitDepthInBytes - 1)] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + x * nBitDepthInBytes + (nBitDepthInBytes - 1)] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32)srcImageU8[chromaOffset + y_chroma * processingPitch + (x + 1) * nBitDepthInBytes + (nBitDepthInBytes - 1)] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
uint32 yuvi[6];
float red[2], green[2], blue[2];
yuvi[0] = (yuv101010Pel[0] & COLOR_COMPONENT_MASK);
yuvi[1] = ((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[2] = ((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
yuvi[3] = (yuv101010Pel[1] & COLOR_COMPONENT_MASK);
yuvi[4] = ((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK);
yuvi[5] = ((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK);
// YUV to RGB Transformation conversion
YUV2RGB(&yuvi[0], &red[0], &green[0], &blue[0]);
YUV2RGB(&yuvi[3], &red[1], &green[1], &blue[1]);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_10bit(red[0], green[0], blue[0], constAlpha);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_10bit(red[1], green[1], blue[1], constAlpha);
}
|
dd775157be7f23a7aa8f21c27a05f885810feb25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <thrust/random.h>
#include <thrust/sort.h>
#include <iostream>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/math/sample_prob.h"
#include "paddle/fluid/operators/math/sampler.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
namespace math {
using Tensor = framework::Tensor;
template <typename T>
__device__ T gpu_adjust_prob(const T prob,
const int num_samples,
const int num_tries) {
if (num_samples == num_tries) {
return prob * num_samples;
} else {
return -expm1(num_tries * log1p(-prob));
}
}
class GPULogUniformSampler {
public:
__device__ int64_t Sample(float random,
const int range,
const float log_range) const;
__device__ float Probability(int64_t value, const float log_range) const;
};
__device__ int64_t GPULogUniformSampler::Sample(float random,
const int range,
const float log_range) const {
// Got Log Uniform distribution from uniform distribution by
// inverse_transform_sampling method
const int64_t value = static_cast<int64_t>(exp(random * log_range)) - 1;
// Mathematically, value should be <= range_, but might not be due to some
// floating point roundoff, so we mod by range_.
return value % range;
}
__device__ float GPULogUniformSampler::Probability(
int64_t value, const float log_range) const {
// Given f(x) = 1/[(x+1) * log_range_]
// The value's probability is integral of f(x) from value to (value + 1)
return (log((value + 2.0) / (value + 1.0))) / log_range;
}
template <typename T>
__global__ void SamplingCondidate(const size_t n,
const int num_tries,
const int range,
const float log_range,
const int num_true,
const std::size_t num_samples,
const int64_t* label_data,
int64_t* samples_data,
T* probabilities_data) {
const int num_sampled_classes = num_true + num_samples;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = 0;
GPULogUniformSampler sampler;
for (; idx < n; idx += blockDim.x * gridDim.x) {
int col_idx = idx % num_sampled_classes;
int row_idx = idx / num_sampled_classes;
if (col_idx < num_true) {
samples_data[idx] = label_data[row_idx * num_true + col_idx];
} else {
samples_data[idx] = samples_data[col_idx];
}
probabilities_data[idx] = sampler.Probability(samples_data[idx], log_range);
probabilities_data[idx] =
gpu_adjust_prob(probabilities_data[idx], num_samples, num_tries);
}
}
template <typename T>
int UniqSampler(const Sampler& sampler,
const std::size_t num_samples,
int64_t* samples_data) {
// sample num_samles unique samples for an example, note that they are not
// all negative samples
std::unordered_set<int64_t> tmp_samples;
tmp_samples.clear();
int num_tries = 0;
int j = 0;
while (j < num_samples) {
++num_tries;
auto v = sampler.Sample();
auto insert_ok = tmp_samples.insert(v).second;
if (!insert_ok) {
continue;
}
samples_data[j] = v;
++j;
}
return num_tries;
}
template <typename T>
void GPUSampleWithProb<T>::operator()(const phi::GPUContext& context,
const int seed,
const int dict_size,
const bool uniq,
const std::size_t num_samples,
const Tensor* L,
Tensor* S,
Tensor* P) {
// UNDERSTAND: dimension issues
const auto lbl_dim = L->dims();
const int batch_size = lbl_dim[0];
const int num_true = lbl_dim[1];
const int num_sampled_classes = num_true + num_samples;
framework::DDim ret_dim{batch_size, num_sampled_classes};
// UNDERSTAND: raw data view
const int64_t* label_data = L->data<int64_t>();
int64_t* samples_data = S->data<int64_t>();
T* probabilities_data = P->data<T>();
int s_size = num_samples;
framework::DDim s_dim{s_size};
Tensor s;
int64_t* s_data = s.mutable_data<int64_t>(s_dim, platform::CPUPlace());
math::LogUniformSampler sampler(dict_size, seed);
int range = dict_size;
float log_range = log(range + 1);
int num_tries = UniqSampler<T>(sampler, num_samples, s_data);
VLOG(1) << "num_tries: " << num_tries;
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(samples_data + num_true,
s_data,
sizeof(int64_t) * num_samples,
hipMemcpyHostToDevice));
#else
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(samples_data + num_true,
s_data,
sizeof(int64_t) * num_samples,
hipMemcpyHostToDevice));
#endif
int threads = 512;
const size_t size = batch_size * num_sampled_classes;
int grid = (batch_size * num_sampled_classes + threads - 1) / threads;
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(HIP_KERNEL_NAME(SamplingCondidate<T>),
dim3(grid),
dim3(threads),
0,
context.stream(),
size,
num_tries,
range,
log_range,
num_true,
num_samples,
label_data,
samples_data,
probabilities_data);
#else
hipLaunchKernelGGL(( SamplingCondidate<T>)
, dim3(grid), dim3(threads), 0, context.stream(), size,
num_tries,
range,
log_range,
num_true,
num_samples,
label_data,
samples_data,
probabilities_data);
#endif
}
template class GPUSampleWithProb<float>;
template class GPUSampleWithProb<double>;
} // namespace math
} // namespace operators
} // namespace paddle
| dd775157be7f23a7aa8f21c27a05f885810feb25.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <thrust/random.h>
#include <thrust/sort.h>
#include <iostream>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/math/sample_prob.h"
#include "paddle/fluid/operators/math/sampler.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
namespace math {
using Tensor = framework::Tensor;
template <typename T>
__device__ T gpu_adjust_prob(const T prob,
const int num_samples,
const int num_tries) {
if (num_samples == num_tries) {
return prob * num_samples;
} else {
return -expm1(num_tries * log1p(-prob));
}
}
class GPULogUniformSampler {
public:
__device__ int64_t Sample(float random,
const int range,
const float log_range) const;
__device__ float Probability(int64_t value, const float log_range) const;
};
__device__ int64_t GPULogUniformSampler::Sample(float random,
const int range,
const float log_range) const {
// Got Log Uniform distribution from uniform distribution by
// inverse_transform_sampling method
const int64_t value = static_cast<int64_t>(exp(random * log_range)) - 1;
// Mathematically, value should be <= range_, but might not be due to some
// floating point roundoff, so we mod by range_.
return value % range;
}
__device__ float GPULogUniformSampler::Probability(
int64_t value, const float log_range) const {
// Given f(x) = 1/[(x+1) * log_range_]
// The value's probability is integral of f(x) from value to (value + 1)
return (log((value + 2.0) / (value + 1.0))) / log_range;
}
template <typename T>
__global__ void SamplingCondidate(const size_t n,
const int num_tries,
const int range,
const float log_range,
const int num_true,
const std::size_t num_samples,
const int64_t* label_data,
int64_t* samples_data,
T* probabilities_data) {
const int num_sampled_classes = num_true + num_samples;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = 0;
GPULogUniformSampler sampler;
for (; idx < n; idx += blockDim.x * gridDim.x) {
int col_idx = idx % num_sampled_classes;
int row_idx = idx / num_sampled_classes;
if (col_idx < num_true) {
samples_data[idx] = label_data[row_idx * num_true + col_idx];
} else {
samples_data[idx] = samples_data[col_idx];
}
probabilities_data[idx] = sampler.Probability(samples_data[idx], log_range);
probabilities_data[idx] =
gpu_adjust_prob(probabilities_data[idx], num_samples, num_tries);
}
}
template <typename T>
int UniqSampler(const Sampler& sampler,
const std::size_t num_samples,
int64_t* samples_data) {
// sample num_samles unique samples for an example, note that they are not
// all negative samples
std::unordered_set<int64_t> tmp_samples;
tmp_samples.clear();
int num_tries = 0;
int j = 0;
while (j < num_samples) {
++num_tries;
auto v = sampler.Sample();
auto insert_ok = tmp_samples.insert(v).second;
if (!insert_ok) {
continue;
}
samples_data[j] = v;
++j;
}
return num_tries;
}
template <typename T>
void GPUSampleWithProb<T>::operator()(const phi::GPUContext& context,
const int seed,
const int dict_size,
const bool uniq,
const std::size_t num_samples,
const Tensor* L,
Tensor* S,
Tensor* P) {
// UNDERSTAND: dimension issues
const auto lbl_dim = L->dims();
const int batch_size = lbl_dim[0];
const int num_true = lbl_dim[1];
const int num_sampled_classes = num_true + num_samples;
framework::DDim ret_dim{batch_size, num_sampled_classes};
// UNDERSTAND: raw data view
const int64_t* label_data = L->data<int64_t>();
int64_t* samples_data = S->data<int64_t>();
T* probabilities_data = P->data<T>();
int s_size = num_samples;
framework::DDim s_dim{s_size};
Tensor s;
int64_t* s_data = s.mutable_data<int64_t>(s_dim, platform::CPUPlace());
math::LogUniformSampler sampler(dict_size, seed);
int range = dict_size;
float log_range = log(range + 1);
int num_tries = UniqSampler<T>(sampler, num_samples, s_data);
VLOG(1) << "num_tries: " << num_tries;
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpy(samples_data + num_true,
s_data,
sizeof(int64_t) * num_samples,
hipMemcpyHostToDevice));
#else
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpy(samples_data + num_true,
s_data,
sizeof(int64_t) * num_samples,
cudaMemcpyHostToDevice));
#endif
int threads = 512;
const size_t size = batch_size * num_sampled_classes;
int grid = (batch_size * num_sampled_classes + threads - 1) / threads;
#ifdef PADDLE_WITH_HIP
hipLaunchKernelGGL(HIP_KERNEL_NAME(SamplingCondidate<T>),
dim3(grid),
dim3(threads),
0,
context.stream(),
size,
num_tries,
range,
log_range,
num_true,
num_samples,
label_data,
samples_data,
probabilities_data);
#else
SamplingCondidate<T>
<<<grid, threads, 0, context.stream()>>>(size,
num_tries,
range,
log_range,
num_true,
num_samples,
label_data,
samples_data,
probabilities_data);
#endif
}
template class GPUSampleWithProb<float>;
template class GPUSampleWithProb<double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
b1d47680be0666230d8b983ca382e9144948f15e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/lrn.h>
#include <Status.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void lrnKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) {
extern __shared__ char sharedChar[];
T* shared = reinterpret_cast<T*>(sharedChar);
auto xEws = shape::elementWiseStride(xTadShapeInfo);
auto zEws = shape::elementWiseStride(zTadShapeInfo);
auto xOrder = shape::order(xTadShapeInfo);
auto zOrder = shape::order(zTadShapeInfo);
const T tbias = static_cast<T>(bias);
const T tbeta = static_cast<T>(beta);
const T talpha = static_cast<T>(alpha);
// one block of threads processes 1 example within batch
for (uint i = blockIdx.x; i < numTads; i += gridDim.x) {
auto x = reinterpret_cast<T*>(vx) + xTadOffsets[i];
auto z = reinterpret_cast<T*>(vz) + zTadOffsets[i];
// load everything into shared memory, so we'll operate on shared memory from now on
shared[threadIdx.x] = x[threadIdx.x * xEws];
__syncthreads();
const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth);
const uint last = depth + threadIdx.x + 1;
const uint end = nd4j::math::nd4j_min<int>(last, tadLength);
T prev = 0.;
for (int s = begin; s < end; s++)
prev = prev + shared[s] * shared[s];
z[threadIdx.x * zEws] = shared[threadIdx.x] / nd4j::math::nd4j_pow<T, T, T>(tbias + alpha * prev, tbeta);
}
}
template <typename X, typename Z>
static _CUDA_G void lrnBPKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) {
extern __shared__ char sharedChar[];
X* sharedX = reinterpret_cast<X*>(sharedChar);
Z* sharedY = reinterpret_cast<Z*>(sharedX + blockDim.x);
auto xEws = shape::elementWiseStride(xTadShapeInfo);
auto zEws = shape::elementWiseStride(zTadShapeInfo);
auto xOrder = shape::order(xTadShapeInfo);
auto zOrder = shape::order(zTadShapeInfo);
const Z tbias = static_cast<Z>(bias);
const Z tbeta = static_cast<Z>(beta);
const Z talpha = static_cast<Z>(alpha);
const Z coeff = talpha * tbeta;
for (uint i = blockIdx.x; i < numTads; i += gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i];
auto z = reinterpret_cast<Z*>(vz) + zTadOffsets[i];
const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth);
const uint last = depth + threadIdx.x + 1;
const uint end = nd4j::math::nd4j_min<int>(last, tadLength);
// load everything into shared memory
sharedX[threadIdx.x] = x[threadIdx.x * xEws];
sharedY[threadIdx.x] = 0.f;
__syncthreads();
// we're operating in shared memory
for (int s = begin; s < end; s++)
sharedY[threadIdx.x] = sharedY[threadIdx.x] + sharedX[s] * sharedX[s];
__syncthreads();
Z factor[1024];
Z init = tbias + talpha * sharedY[threadIdx.x];
Z prev = 0.f;
for (uint s = begin; s < end; ++s) {
factor[s] = nd4j::math::nd4j_pow<Z, Z, Z>(tbias + talpha * sharedY[s], -tbeta - 1);
prev = prev + sharedX[s] * factor[s];
}
z[threadIdx.x * zEws] = factor[threadIdx.x] * init - 2 * sharedX[threadIdx.x] * coeff * prev;
}
}
template <typename X, typename Z>
static void lrnBP_(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) {
auto rank = input.rankOf();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), {rank - 1});
auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(gradI.getShapeInfo(), {rank - 1});
const auto tadLength = shape::length(packX.primaryShapeInfo());
const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads());
const int numThreads = tadLength;
if (tadLength > 1024 || tadLength < 1)
throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet");
hipLaunchKernelGGL(( lrnBPKernel<X, Z>), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(X) + numThreads * sizeof(Z) + 1024, *block.launchContext()->getCudaStream(), input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), gradI.specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta);
gradI.tickWriteDevice();
gradI *= gradO;
}
void lrnBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) {
input.syncToDevice();
gradO.syncToDevice();
BUILD_DOUBLE_SELECTOR(input.dataType(), gradO.dataType(), lrnBP_, (block, input, gradO, gradI, depth, bias, alpha, beta), FLOAT_TYPES, FLOAT_TYPES);
gradI.tickWriteDevice();
}
template <typename T>
static void lrnFunctor_(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) {
auto rank = input->rankOf();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), {rank - 1});
auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {rank - 1});
const auto tadLength = shape::length(packX.primaryShapeInfo());
const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads());
const int numThreads = tadLength;
if (tadLength > 1024 || tadLength < 1)
throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet");
hipLaunchKernelGGL(( lrnKernel<T>), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(T), *block.launchContext()->getCudaStream(), input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta);
}
int lrnFunctor(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) {
input->syncToDevice();
BUILD_SINGLE_SELECTOR(input->dataType(), lrnFunctor_, (block, input, output, depth, bias, alpha, beta), FLOAT_TYPES);
output->tickWriteDevice();
return Status::OK();
}
}
}
}
| b1d47680be0666230d8b983ca382e9144948f15e.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/lrn.h>
#include <Status.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void lrnKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) {
extern __shared__ char sharedChar[];
T* shared = reinterpret_cast<T*>(sharedChar);
auto xEws = shape::elementWiseStride(xTadShapeInfo);
auto zEws = shape::elementWiseStride(zTadShapeInfo);
auto xOrder = shape::order(xTadShapeInfo);
auto zOrder = shape::order(zTadShapeInfo);
const T tbias = static_cast<T>(bias);
const T tbeta = static_cast<T>(beta);
const T talpha = static_cast<T>(alpha);
// one block of threads processes 1 example within batch
for (uint i = blockIdx.x; i < numTads; i += gridDim.x) {
auto x = reinterpret_cast<T*>(vx) + xTadOffsets[i];
auto z = reinterpret_cast<T*>(vz) + zTadOffsets[i];
// load everything into shared memory, so we'll operate on shared memory from now on
shared[threadIdx.x] = x[threadIdx.x * xEws];
__syncthreads();
const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth);
const uint last = depth + threadIdx.x + 1;
const uint end = nd4j::math::nd4j_min<int>(last, tadLength);
T prev = 0.;
for (int s = begin; s < end; s++)
prev = prev + shared[s] * shared[s];
z[threadIdx.x * zEws] = shared[threadIdx.x] / nd4j::math::nd4j_pow<T, T, T>(tbias + alpha * prev, tbeta);
}
}
template <typename X, typename Z>
static _CUDA_G void lrnBPKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) {
extern __shared__ char sharedChar[];
X* sharedX = reinterpret_cast<X*>(sharedChar);
Z* sharedY = reinterpret_cast<Z*>(sharedX + blockDim.x);
auto xEws = shape::elementWiseStride(xTadShapeInfo);
auto zEws = shape::elementWiseStride(zTadShapeInfo);
auto xOrder = shape::order(xTadShapeInfo);
auto zOrder = shape::order(zTadShapeInfo);
const Z tbias = static_cast<Z>(bias);
const Z tbeta = static_cast<Z>(beta);
const Z talpha = static_cast<Z>(alpha);
const Z coeff = talpha * tbeta;
for (uint i = blockIdx.x; i < numTads; i += gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i];
auto z = reinterpret_cast<Z*>(vz) + zTadOffsets[i];
const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth);
const uint last = depth + threadIdx.x + 1;
const uint end = nd4j::math::nd4j_min<int>(last, tadLength);
// load everything into shared memory
sharedX[threadIdx.x] = x[threadIdx.x * xEws];
sharedY[threadIdx.x] = 0.f;
__syncthreads();
// we're operating in shared memory
for (int s = begin; s < end; s++)
sharedY[threadIdx.x] = sharedY[threadIdx.x] + sharedX[s] * sharedX[s];
__syncthreads();
Z factor[1024];
Z init = tbias + talpha * sharedY[threadIdx.x];
Z prev = 0.f;
for (uint s = begin; s < end; ++s) {
factor[s] = nd4j::math::nd4j_pow<Z, Z, Z>(tbias + talpha * sharedY[s], -tbeta - 1);
prev = prev + sharedX[s] * factor[s];
}
z[threadIdx.x * zEws] = factor[threadIdx.x] * init - 2 * sharedX[threadIdx.x] * coeff * prev;
}
}
template <typename X, typename Z>
static void lrnBP_(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) {
auto rank = input.rankOf();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), {rank - 1});
auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(gradI.getShapeInfo(), {rank - 1});
const auto tadLength = shape::length(packX.primaryShapeInfo());
const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads());
const int numThreads = tadLength;
if (tadLength > 1024 || tadLength < 1)
throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet");
lrnBPKernel<X, Z><<<numBlocks, numThreads, numThreads * sizeof(X) + numThreads * sizeof(Z) + 1024, *block.launchContext()->getCudaStream()>>>(input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), gradI.specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta);
gradI.tickWriteDevice();
gradI *= gradO;
}
void lrnBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) {
input.syncToDevice();
gradO.syncToDevice();
BUILD_DOUBLE_SELECTOR(input.dataType(), gradO.dataType(), lrnBP_, (block, input, gradO, gradI, depth, bias, alpha, beta), FLOAT_TYPES, FLOAT_TYPES);
gradI.tickWriteDevice();
}
template <typename T>
static void lrnFunctor_(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) {
auto rank = input->rankOf();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), {rank - 1});
auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {rank - 1});
const auto tadLength = shape::length(packX.primaryShapeInfo());
const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads());
const int numThreads = tadLength;
if (tadLength > 1024 || tadLength < 1)
throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet");
lrnKernel<T><<<numBlocks, numThreads, numThreads * sizeof(T), *block.launchContext()->getCudaStream()>>>(input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta);
}
int lrnFunctor(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) {
input->syncToDevice();
BUILD_SINGLE_SELECTOR(input->dataType(), lrnFunctor_, (block, input, output, depth, bias, alpha, beta), FLOAT_TYPES);
output->tickWriteDevice();
return Status::OK();
}
}
}
}
|
67f9c4809daed9f0e7720790536889d627fc5f8d.hip | // !!! This is a file automatically generated by hipify!!!
// mining.cu
/*******************************************************************************
MINING -- Autolykos parallel BlockMining procedure
*******************************************************************************/
#include "../include/mining.h"
#include <hip/hip_runtime.h>
////////////////////////////////////////////////////////////////////////////////
// Unfinalized hash of message
////////////////////////////////////////////////////////////////////////////////
void InitMining(
// context
ctx_t * ctx,
// message
const uint32_t * mes,
// message length in bytes
const uint32_t meslen
)
{
uint64_t aux[32];
//========================================================================//
// Initialize context
//========================================================================//
memset(ctx->b, 0, BUF_SIZE_8);
B2B_IV(ctx->h);
ctx->h[0] ^= 0x01010000 ^ NUM_SIZE_8;
memset(ctx->t, 0, 16);
ctx->c = 0;
//========================================================================//
// Hash message
//========================================================================//
for (uint_t j = 0; j < meslen; ++j)
{
if (ctx->c == BUF_SIZE_8) { HOST_B2B_H(ctx, aux); }
ctx->b[ctx->c++] = ((const uint8_t *)mes)[j];
}
return;
}
__global__ void BlakeHash(const uint32_t* data, const uint64_t base, uint32_t* BHashes)
{
uint32_t tid = threadIdx.x;
// shared memory
/*
__shared__ uint32_t sdata[ROUND_NC_SIZE_32];
#pragma unroll
for (int i = 0; i < NC_SIZE_32_BLOCK; ++i)
{
sdata[NC_SIZE_32_BLOCK * tid + i]
= data[
NC_SIZE_32_BLOCK * tid + NUM_SIZE_32 * 2
+ COUPLED_PK_SIZE_32 + i
];
}
*/
// __syncthreads();
// NUM_SIZE_8 bytes
// local memory
// 472 bytes
__shared__ uint32_t sdata[BLOCK_DIM*64];
__shared__ ctx_t ctxdata[BLOCK_DIM];
uint32_t *ldata = sdata + tid*64;
// uint32_t ldata[118];
// 256 bytes
uint64_t * aux = (uint64_t *)ldata;
// (4 * K_LEN) bytes
uint32_t * ind = ldata;
// (NUM_SIZE_8 + 4) bytes
uint32_t * r = ind + K_LEN;
// (212 + 4) bytes
ctx_t * ctx = ctxdata + tid;
// *ctx = *((ctx_t *)(sdata + NUM_SIZE_32));
memcpy(ctx,data + NUM_SIZE_32*3 + COUPLED_PK_SIZE_32, sizeof(ctx_t));
tid = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t j;
__shared__ uint32_t nonces[NONCE_SIZE_32*BLOCK_DIM];
uint32_t* non = nonces + NONCE_SIZE_32*threadIdx.x;
// uint32_t non[NONCE_SIZE_32];
asm volatile (
"add.cc.u32 %0, %1, %2;":
"=r"(non[0]): "r"(((uint32_t *)&base)[0]), "r"(tid)
);
asm volatile (
"addc.u32 %0, %1, 0;": "=r"(non[1]): "r"(((uint32_t *)&base)[1])
);
//================================================================//
// Hash nonce
//================================================================//
#pragma unroll
for (j = 0; ctx->c < BUF_SIZE_8 && j < NONCE_SIZE_8; ++j)
{
ctx->b[ctx->c++] = ((uint8_t *)non)[NONCE_SIZE_8 - j - 1];
}
#pragma unroll
for ( ; j < NONCE_SIZE_8; )
{
DEVICE_B2B_H(ctx, aux);
#pragma unroll
for ( ; ctx->c < BUF_SIZE_8 && j < NONCE_SIZE_8; ++j)
{
ctx->b[ctx->c++] = ((uint8_t *)non)[NONCE_SIZE_8 - j - 1];
}
}
//================================================================//
// Finalize hashes
//================================================================//
DEVICE_B2B_H_LAST(ctx, aux);
#pragma unroll
for (j = 0; j < NUM_SIZE_8; ++j)
{
((uint8_t *) (BHashes + NUM_SIZE_32*tid ) )[(j & 0xFFFFFFFC) + (3 - (j & 3))]
= (ctx->h[j >> 3] >> ((j & 7) << 3)) & 0xFF;
}
}
////////////////////////////////////////////////////////////////////////////////
// Block mining
////////////////////////////////////////////////////////////////////////////////
__global__ void BlockMining(
// boundary for puzzle
const uint32_t * bound,
// data: pk || mes || w || padding || x || sk || ctx
const uint32_t * data,
// nonce base
const uint64_t base,
// precalculated hashes
const uint32_t * __restrict__ hashes,
// results
uint32_t * res,
// indices of valid solutions
uint32_t * valid,
uint32_t * count,
uint32_t *BHashes
)
{
uint32_t tid = threadIdx.x;
// NUM_SIZE_8 bytes
__shared__ uint32_t sk[NUM_SIZE_32];
uint32_t ldata[42];
uint32_t * ind = ldata;
// (NUM_SIZE_8 + 4) bytes
uint32_t * r = ind + K_LEN;
// *ctx = *((ctx_t *)(sdata + NUM_SIZE_32));
memcpy(sk, data + NUM_SIZE_32*2 + COUPLED_PK_SIZE_32, NUM_SIZE_32*sizeof(uint32_t));
tid = threadIdx.x + blockDim.x * blockIdx.x;
memcpy(r, BHashes + tid*NUM_SIZE_32, NUM_SIZE_32*sizeof(uint32_t));
uint32_t j;
//================================================================//
// Generate indices
//================================================================//
#pragma unroll
for (int i = 1; i < INDEX_SIZE_8; ++i)
{
((uint8_t *)r)[NUM_SIZE_8 + i] = ((uint8_t *)r)[i];
}
#pragma unroll
for (int k = 0; k < K_LEN; k += INDEX_SIZE_8)
{
ind[k] = r[k >> 2] & N_MASK;
#pragma unroll
for (int i = 1; i < INDEX_SIZE_8; ++i)
{
ind[k + i]
= (
(r[k >> 2] << (i << 3))
| (r[(k >> 2) + 1] >> (32 - (i << 3)))
) & N_MASK;
}
}
//================================================================//
// Calculate result
//================================================================//
// first addition of hashes -> r
asm volatile (
"add.cc.u32 %0, %1, %2;":
"=r"(r[0]): "r"(hashes[ind[0] << 3]), "r"(hashes[ind[1] << 3])
);
#pragma unroll
for (int i = 1; i < 8; ++i)
{
asm volatile (
"addc.cc.u32 %0, %1, %2;":
"=r"(r[i]):
"r"(hashes[(ind[0] << 3) + i]),
"r"(hashes[(ind[1] << 3) + i])
);
}
asm volatile ("addc.u32 %0, 0, 0;": "=r"(r[8]));
// remaining additions
#pragma unroll
for (int k = 2; k < K_LEN; ++k)
{
asm volatile (
"add.cc.u32 %0, %0, %1;":
"+r"(r[0]): "r"(hashes[ind[k] << 3])
);
#pragma unroll
for (int i = 1; i < 8; ++i)
{
asm volatile (
"addc.cc.u32 %0, %0, %1;":
"+r"(r[i]): "r"(hashes[(ind[k] << 3) + i])
);
}
asm volatile ("addc.u32 %0, %0, 0;": "+r"(r[8]));
}
// subtraction of secret key
asm volatile ("sub.cc.u32 %0, %0, %1;": "+r"(r[0]): "r"(sk[0]));
#pragma unroll
for (int i = 1; i < 8; ++i)
{
asm volatile (
"subc.cc.u32 %0, %0, %1;": "+r"(r[i]): "r"(sk[i])
);
}
asm volatile ("subc.u32 %0, %0, 0;": "+r"(r[8]));
//================================================================//
// Result mod Q
//================================================================//
// 20 bytes
uint32_t * med = ind;
// 4 bytes
uint32_t * d = ind + 5;
uint32_t * carry = d;
d[0] = r[8];
//================================================================//
asm volatile (
"mul.lo.u32 %0, %1, " q0_s ";": "=r"(med[0]): "r"(*d)
);
asm volatile (
"mul.hi.u32 %0, %1, " q0_s ";": "=r"(med[1]): "r"(*d)
);
asm volatile (
"mul.lo.u32 %0, %1, " q2_s ";": "=r"(med[2]): "r"(*d)
);
asm volatile (
"mul.hi.u32 %0, %1, " q2_s ";": "=r"(med[3]): "r"(*d)
);
asm volatile (
"mad.lo.cc.u32 %0, %1, " q1_s ", %0;": "+r"(med[1]): "r"(*d)
);
asm volatile (
"madc.hi.cc.u32 %0, %1, " q1_s ", %0;": "+r"(med[2]): "r"(*d)
);
asm volatile (
"madc.lo.cc.u32 %0, %1, " q3_s ", %0;": "+r"(med[3]): "r"(*d)
);
asm volatile (
"madc.hi.u32 %0, %1, " q3_s ", 0;": "=r"(med[4]): "r"(*d)
);
//================================================================//
asm volatile ("sub.cc.u32 %0, %0, %1;": "+r"(r[0]): "r"(med[0]));
#pragma unroll
for (int i = 1; i < 5; ++i)
{
asm volatile (
"subc.cc.u32 %0, %0, %1;": "+r"(r[i]): "r"(med[i])
);
}
#pragma unroll
for (int i = 5; i < 7; ++i)
{
asm volatile ("subc.cc.u32 %0, %0, 0;": "+r"(r[i]));
}
asm volatile ("subc.u32 %0, %0, 0;": "+r"(r[7]));
//================================================================//
d[1] = d[0] >> 31;
d[0] <<= 1;
asm volatile ("add.cc.u32 %0, %0, %1;": "+r"(r[4]): "r"(d[0]));
asm volatile ("addc.cc.u32 %0, %0, %1;": "+r"(r[5]): "r"(d[1]));
asm volatile ("addc.cc.u32 %0, %0, 0;": "+r"(r[6]));
asm volatile ("addc.u32 %0, %0, 0;": "+r"(r[7]));
//================================================================//
asm volatile ("sub.cc.u32 %0, %0, " q0_s ";": "+r"(r[0]));
asm volatile ("subc.cc.u32 %0, %0, " q1_s ";": "+r"(r[1]));
asm volatile ("subc.cc.u32 %0, %0, " q2_s ";": "+r"(r[2]));
asm volatile ("subc.cc.u32 %0, %0, " q3_s ";": "+r"(r[3]));
asm volatile ("subc.cc.u32 %0, %0, " q4_s ";": "+r"(r[4]));
#pragma unroll
for (int i = 5; i < 8; ++i)
{
asm volatile ("subc.cc.u32 %0, %0, " qhi_s ";": "+r"(r[i]));
}
asm volatile ("subc.u32 %0, 0, 0;": "=r"(*carry));
*carry = 0 - *carry;
//================================================================//
asm volatile (
"mad.lo.cc.u32 %0, %1, " q0_s ", %0;": "+r"(r[0]): "r"(*carry)
);
asm volatile (
"madc.lo.cc.u32 %0, %1, " q1_s ", %0;": "+r"(r[1]): "r"(*carry)
);
asm volatile (
"madc.lo.cc.u32 %0, %1, " q2_s ", %0;": "+r"(r[2]): "r"(*carry)
);
asm volatile (
"madc.lo.cc.u32 %0, %1, " q3_s ", %0;": "+r"(r[3]): "r"(*carry)
);
asm volatile (
"madc.lo.cc.u32 %0, %1, " q4_s ", %0;": "+r"(r[4]): "r"(*carry)
);
#pragma unroll
for (int i = 5; i < 7; ++i)
{
asm volatile (
"madc.lo.cc.u32 %0, %1, " qhi_s ", %0;":
"+r"(r[i]): "r"(*carry)
);
}
asm volatile (
"madc.lo.u32 %0, %1, " qhi_s ", %0;": "+r"(r[7]): "r"(*carry)
);
//================================================================//
// Dump result to global memory -- LITTLE ENDIAN
//================================================================//
j = ((uint64_t *)r)[3] < ((uint64_t *)bound)[3]
|| ((uint64_t *)r)[3] == ((uint64_t *)bound)[3] && (
((uint64_t *)r)[2] < ((uint64_t *)bound)[2]
|| ((uint64_t *)r)[2] == ((uint64_t *)bound)[2] && (
((uint64_t *)r)[1] < ((uint64_t *)bound)[1]
|| ((uint64_t *)r)[1] == ((uint64_t *)bound)[1]
&& ((uint64_t *)r)[0] < ((uint64_t *)bound)[0]
)
);
if(j)
{
uint32_t id = atomicInc(count, MAX_SOLS);
valid[id] = tid+1;
#pragma unroll
for (int i = 0; i < NUM_SIZE_32; ++i)
{
res[i + id*NUM_SIZE_32] = r[i];
}
}
}
// mining.cu
| 67f9c4809daed9f0e7720790536889d627fc5f8d.cu | // mining.cu
/*******************************************************************************
MINING -- Autolykos parallel BlockMining procedure
*******************************************************************************/
#include "../include/mining.h"
#include <cuda.h>
////////////////////////////////////////////////////////////////////////////////
// Unfinalized hash of message
////////////////////////////////////////////////////////////////////////////////
void InitMining(
// context
ctx_t * ctx,
// message
const uint32_t * mes,
// message length in bytes
const uint32_t meslen
)
{
uint64_t aux[32];
//========================================================================//
// Initialize context
//========================================================================//
memset(ctx->b, 0, BUF_SIZE_8);
B2B_IV(ctx->h);
ctx->h[0] ^= 0x01010000 ^ NUM_SIZE_8;
memset(ctx->t, 0, 16);
ctx->c = 0;
//========================================================================//
// Hash message
//========================================================================//
for (uint_t j = 0; j < meslen; ++j)
{
if (ctx->c == BUF_SIZE_8) { HOST_B2B_H(ctx, aux); }
ctx->b[ctx->c++] = ((const uint8_t *)mes)[j];
}
return;
}
__global__ void BlakeHash(const uint32_t* data, const uint64_t base, uint32_t* BHashes)
{
uint32_t tid = threadIdx.x;
// shared memory
/*
__shared__ uint32_t sdata[ROUND_NC_SIZE_32];
#pragma unroll
for (int i = 0; i < NC_SIZE_32_BLOCK; ++i)
{
sdata[NC_SIZE_32_BLOCK * tid + i]
= data[
NC_SIZE_32_BLOCK * tid + NUM_SIZE_32 * 2
+ COUPLED_PK_SIZE_32 + i
];
}
*/
// __syncthreads();
// NUM_SIZE_8 bytes
// local memory
// 472 bytes
__shared__ uint32_t sdata[BLOCK_DIM*64];
__shared__ ctx_t ctxdata[BLOCK_DIM];
uint32_t *ldata = sdata + tid*64;
// uint32_t ldata[118];
// 256 bytes
uint64_t * aux = (uint64_t *)ldata;
// (4 * K_LEN) bytes
uint32_t * ind = ldata;
// (NUM_SIZE_8 + 4) bytes
uint32_t * r = ind + K_LEN;
// (212 + 4) bytes
ctx_t * ctx = ctxdata + tid;
// *ctx = *((ctx_t *)(sdata + NUM_SIZE_32));
memcpy(ctx,data + NUM_SIZE_32*3 + COUPLED_PK_SIZE_32, sizeof(ctx_t));
tid = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t j;
__shared__ uint32_t nonces[NONCE_SIZE_32*BLOCK_DIM];
uint32_t* non = nonces + NONCE_SIZE_32*threadIdx.x;
// uint32_t non[NONCE_SIZE_32];
asm volatile (
"add.cc.u32 %0, %1, %2;":
"=r"(non[0]): "r"(((uint32_t *)&base)[0]), "r"(tid)
);
asm volatile (
"addc.u32 %0, %1, 0;": "=r"(non[1]): "r"(((uint32_t *)&base)[1])
);
//================================================================//
// Hash nonce
//================================================================//
#pragma unroll
for (j = 0; ctx->c < BUF_SIZE_8 && j < NONCE_SIZE_8; ++j)
{
ctx->b[ctx->c++] = ((uint8_t *)non)[NONCE_SIZE_8 - j - 1];
}
#pragma unroll
for ( ; j < NONCE_SIZE_8; )
{
DEVICE_B2B_H(ctx, aux);
#pragma unroll
for ( ; ctx->c < BUF_SIZE_8 && j < NONCE_SIZE_8; ++j)
{
ctx->b[ctx->c++] = ((uint8_t *)non)[NONCE_SIZE_8 - j - 1];
}
}
//================================================================//
// Finalize hashes
//================================================================//
DEVICE_B2B_H_LAST(ctx, aux);
#pragma unroll
for (j = 0; j < NUM_SIZE_8; ++j)
{
((uint8_t *) (BHashes + NUM_SIZE_32*tid ) )[(j & 0xFFFFFFFC) + (3 - (j & 3))]
= (ctx->h[j >> 3] >> ((j & 7) << 3)) & 0xFF;
}
}
////////////////////////////////////////////////////////////////////////////////
// Block mining
////////////////////////////////////////////////////////////////////////////////
__global__ void BlockMining(
// boundary for puzzle
const uint32_t * bound,
// data: pk || mes || w || padding || x || sk || ctx
const uint32_t * data,
// nonce base
const uint64_t base,
// precalculated hashes
const uint32_t * __restrict__ hashes,
// results
uint32_t * res,
// indices of valid solutions
uint32_t * valid,
uint32_t * count,
uint32_t *BHashes
)
{
uint32_t tid = threadIdx.x;
// NUM_SIZE_8 bytes
__shared__ uint32_t sk[NUM_SIZE_32];
uint32_t ldata[42];
uint32_t * ind = ldata;
// (NUM_SIZE_8 + 4) bytes
uint32_t * r = ind + K_LEN;
// *ctx = *((ctx_t *)(sdata + NUM_SIZE_32));
memcpy(sk, data + NUM_SIZE_32*2 + COUPLED_PK_SIZE_32, NUM_SIZE_32*sizeof(uint32_t));
tid = threadIdx.x + blockDim.x * blockIdx.x;
memcpy(r, BHashes + tid*NUM_SIZE_32, NUM_SIZE_32*sizeof(uint32_t));
uint32_t j;
//================================================================//
// Generate indices
//================================================================//
#pragma unroll
for (int i = 1; i < INDEX_SIZE_8; ++i)
{
((uint8_t *)r)[NUM_SIZE_8 + i] = ((uint8_t *)r)[i];
}
#pragma unroll
for (int k = 0; k < K_LEN; k += INDEX_SIZE_8)
{
ind[k] = r[k >> 2] & N_MASK;
#pragma unroll
for (int i = 1; i < INDEX_SIZE_8; ++i)
{
ind[k + i]
= (
(r[k >> 2] << (i << 3))
| (r[(k >> 2) + 1] >> (32 - (i << 3)))
) & N_MASK;
}
}
//================================================================//
// Calculate result
//================================================================//
// first addition of hashes -> r
asm volatile (
"add.cc.u32 %0, %1, %2;":
"=r"(r[0]): "r"(hashes[ind[0] << 3]), "r"(hashes[ind[1] << 3])
);
#pragma unroll
for (int i = 1; i < 8; ++i)
{
asm volatile (
"addc.cc.u32 %0, %1, %2;":
"=r"(r[i]):
"r"(hashes[(ind[0] << 3) + i]),
"r"(hashes[(ind[1] << 3) + i])
);
}
asm volatile ("addc.u32 %0, 0, 0;": "=r"(r[8]));
// remaining additions
#pragma unroll
for (int k = 2; k < K_LEN; ++k)
{
asm volatile (
"add.cc.u32 %0, %0, %1;":
"+r"(r[0]): "r"(hashes[ind[k] << 3])
);
#pragma unroll
for (int i = 1; i < 8; ++i)
{
asm volatile (
"addc.cc.u32 %0, %0, %1;":
"+r"(r[i]): "r"(hashes[(ind[k] << 3) + i])
);
}
asm volatile ("addc.u32 %0, %0, 0;": "+r"(r[8]));
}
// subtraction of secret key
asm volatile ("sub.cc.u32 %0, %0, %1;": "+r"(r[0]): "r"(sk[0]));
#pragma unroll
for (int i = 1; i < 8; ++i)
{
asm volatile (
"subc.cc.u32 %0, %0, %1;": "+r"(r[i]): "r"(sk[i])
);
}
asm volatile ("subc.u32 %0, %0, 0;": "+r"(r[8]));
//================================================================//
// Result mod Q
//================================================================//
// 20 bytes
uint32_t * med = ind;
// 4 bytes
uint32_t * d = ind + 5;
uint32_t * carry = d;
d[0] = r[8];
//================================================================//
asm volatile (
"mul.lo.u32 %0, %1, " q0_s ";": "=r"(med[0]): "r"(*d)
);
asm volatile (
"mul.hi.u32 %0, %1, " q0_s ";": "=r"(med[1]): "r"(*d)
);
asm volatile (
"mul.lo.u32 %0, %1, " q2_s ";": "=r"(med[2]): "r"(*d)
);
asm volatile (
"mul.hi.u32 %0, %1, " q2_s ";": "=r"(med[3]): "r"(*d)
);
asm volatile (
"mad.lo.cc.u32 %0, %1, " q1_s ", %0;": "+r"(med[1]): "r"(*d)
);
asm volatile (
"madc.hi.cc.u32 %0, %1, " q1_s ", %0;": "+r"(med[2]): "r"(*d)
);
asm volatile (
"madc.lo.cc.u32 %0, %1, " q3_s ", %0;": "+r"(med[3]): "r"(*d)
);
asm volatile (
"madc.hi.u32 %0, %1, " q3_s ", 0;": "=r"(med[4]): "r"(*d)
);
//================================================================//
asm volatile ("sub.cc.u32 %0, %0, %1;": "+r"(r[0]): "r"(med[0]));
#pragma unroll
for (int i = 1; i < 5; ++i)
{
asm volatile (
"subc.cc.u32 %0, %0, %1;": "+r"(r[i]): "r"(med[i])
);
}
#pragma unroll
for (int i = 5; i < 7; ++i)
{
asm volatile ("subc.cc.u32 %0, %0, 0;": "+r"(r[i]));
}
asm volatile ("subc.u32 %0, %0, 0;": "+r"(r[7]));
//================================================================//
d[1] = d[0] >> 31;
d[0] <<= 1;
asm volatile ("add.cc.u32 %0, %0, %1;": "+r"(r[4]): "r"(d[0]));
asm volatile ("addc.cc.u32 %0, %0, %1;": "+r"(r[5]): "r"(d[1]));
asm volatile ("addc.cc.u32 %0, %0, 0;": "+r"(r[6]));
asm volatile ("addc.u32 %0, %0, 0;": "+r"(r[7]));
//================================================================//
asm volatile ("sub.cc.u32 %0, %0, " q0_s ";": "+r"(r[0]));
asm volatile ("subc.cc.u32 %0, %0, " q1_s ";": "+r"(r[1]));
asm volatile ("subc.cc.u32 %0, %0, " q2_s ";": "+r"(r[2]));
asm volatile ("subc.cc.u32 %0, %0, " q3_s ";": "+r"(r[3]));
asm volatile ("subc.cc.u32 %0, %0, " q4_s ";": "+r"(r[4]));
#pragma unroll
for (int i = 5; i < 8; ++i)
{
asm volatile ("subc.cc.u32 %0, %0, " qhi_s ";": "+r"(r[i]));
}
asm volatile ("subc.u32 %0, 0, 0;": "=r"(*carry));
*carry = 0 - *carry;
//================================================================//
asm volatile (
"mad.lo.cc.u32 %0, %1, " q0_s ", %0;": "+r"(r[0]): "r"(*carry)
);
asm volatile (
"madc.lo.cc.u32 %0, %1, " q1_s ", %0;": "+r"(r[1]): "r"(*carry)
);
asm volatile (
"madc.lo.cc.u32 %0, %1, " q2_s ", %0;": "+r"(r[2]): "r"(*carry)
);
asm volatile (
"madc.lo.cc.u32 %0, %1, " q3_s ", %0;": "+r"(r[3]): "r"(*carry)
);
asm volatile (
"madc.lo.cc.u32 %0, %1, " q4_s ", %0;": "+r"(r[4]): "r"(*carry)
);
#pragma unroll
for (int i = 5; i < 7; ++i)
{
asm volatile (
"madc.lo.cc.u32 %0, %1, " qhi_s ", %0;":
"+r"(r[i]): "r"(*carry)
);
}
asm volatile (
"madc.lo.u32 %0, %1, " qhi_s ", %0;": "+r"(r[7]): "r"(*carry)
);
//================================================================//
// Dump result to global memory -- LITTLE ENDIAN
//================================================================//
j = ((uint64_t *)r)[3] < ((uint64_t *)bound)[3]
|| ((uint64_t *)r)[3] == ((uint64_t *)bound)[3] && (
((uint64_t *)r)[2] < ((uint64_t *)bound)[2]
|| ((uint64_t *)r)[2] == ((uint64_t *)bound)[2] && (
((uint64_t *)r)[1] < ((uint64_t *)bound)[1]
|| ((uint64_t *)r)[1] == ((uint64_t *)bound)[1]
&& ((uint64_t *)r)[0] < ((uint64_t *)bound)[0]
)
);
if(j)
{
uint32_t id = atomicInc(count, MAX_SOLS);
valid[id] = tid+1;
#pragma unroll
for (int i = 0; i < NUM_SIZE_32; ++i)
{
res[i + id*NUM_SIZE_32] = r[i];
}
}
}
// mining.cu
|
c5935350f9d717a0b6e0667bf901bb6a61e60de0.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2023, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include <memory>
#include <random>
#include <hip/hip_runtime.h>
#include <gtest/gtest.h>
#include <ginkgo/core/base/exception.hpp>
#include <ginkgo/core/base/executor.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/dense.hpp>
#include <ginkgo/core/solver/triangular.hpp>
#include "core/solver/lower_trs_kernels.hpp"
#include "cuda/test/utils.hpp"
namespace {
class LowerTrs : public CudaTestFixture {
protected:
using CsrMtx = gko::matrix::Csr<double, gko::int32>;
using Mtx = gko::matrix::Dense<>;
LowerTrs() : rand_engine(30) {}
std::unique_ptr<Mtx> gen_mtx(int num_rows, int num_cols)
{
return gko::test::generate_random_matrix<Mtx>(
num_rows, num_cols,
std::uniform_int_distribution<>(num_cols, num_cols),
std::normal_distribution<>(-1.0, 1.0), rand_engine, ref);
}
std::unique_ptr<Mtx> gen_l_mtx(int size)
{
return gko::test::generate_random_lower_triangular_matrix<Mtx>(
size, false, std::uniform_int_distribution<>(size),
std::normal_distribution<>(-1.0, 1.0), rand_engine, ref);
}
void initialize_data(int m, int n)
{
mtx = gen_l_mtx(m);
b = gen_mtx(m, n);
x = gen_mtx(m, n);
csr_mtx = CsrMtx::create(ref);
mtx->convert_to(csr_mtx);
d_csr_mtx = CsrMtx::create(exec);
d_x = gko::clone(exec, x);
d_csr_mtx->copy_from(csr_mtx);
b2 = Mtx::create(ref);
d_b2 = gko::clone(exec, b);
b2->copy_from(b);
}
std::shared_ptr<Mtx> b;
std::shared_ptr<Mtx> b2;
std::shared_ptr<Mtx> x;
std::shared_ptr<Mtx> mtx;
std::shared_ptr<CsrMtx> csr_mtx;
std::shared_ptr<Mtx> d_b;
std::shared_ptr<Mtx> d_b2;
std::shared_ptr<Mtx> d_x;
std::shared_ptr<CsrMtx> d_csr_mtx;
std::default_random_engine rand_engine;
};
TEST_F(LowerTrs, CudaLowerTrsFlagCheckIsCorrect)
{
bool trans_flag = true;
bool expected_flag = false;
gko::kernels::cuda::lower_trs::should_perform_transpose(exec, trans_flag);
ASSERT_EQ(expected_flag, trans_flag);
}
TEST_F(LowerTrs, CudaSingleRhsApplySyncfreeIsEquivalentToRef)
{
initialize_data(50, 1);
auto lower_trs_factory = gko::solver::LowerTrs<>::build().on(ref);
auto d_lower_trs_factory =
gko::solver::LowerTrs<>::build()
.with_algorithm(gko::solver::trisolve_algorithm::syncfree)
.on(exec);
auto solver = lower_trs_factory->generate(csr_mtx);
auto d_solver = d_lower_trs_factory->generate(d_csr_mtx);
solver->apply(b2, x);
d_solver->apply(d_b2, d_x);
GKO_ASSERT_MTX_NEAR(d_x, x, 1e-14);
}
TEST_F(LowerTrs, CudaSingleRhsApplyIsEquivalentToRef)
{
initialize_data(50, 1);
auto lower_trs_factory = gko::solver::LowerTrs<>::build().on(ref);
auto d_lower_trs_factory = gko::solver::LowerTrs<>::build().on(exec);
auto solver = lower_trs_factory->generate(csr_mtx);
auto d_solver = d_lower_trs_factory->generate(d_csr_mtx);
solver->apply(b2, x);
d_solver->apply(d_b2, d_x);
GKO_ASSERT_MTX_NEAR(d_x, x, 1e-14);
}
TEST_F(LowerTrs, CudaMultipleRhsApplySyncfreeIsEquivalentToRef)
{
initialize_data(50, 3);
auto lower_trs_factory =
gko::solver::LowerTrs<>::build().with_num_rhs(3u).on(ref);
auto d_lower_trs_factory =
gko::solver::LowerTrs<>::build()
.with_algorithm(gko::solver::trisolve_algorithm::syncfree)
.with_num_rhs(3u)
.on(exec);
auto solver = lower_trs_factory->generate(csr_mtx);
auto d_solver = d_lower_trs_factory->generate(d_csr_mtx);
auto db2_strided = Mtx::create(exec, b->get_size(), 4);
d_b2->convert_to(db2_strided);
auto dx_strided = Mtx::create(exec, x->get_size(), 5);
solver->apply(b2, x);
d_solver->apply(db2_strided, dx_strided);
GKO_ASSERT_MTX_NEAR(dx_strided, x, 1e-14);
}
TEST_F(LowerTrs, CudaMultipleRhsApplyIsEquivalentToRef)
{
initialize_data(50, 3);
#if TORCH_HIP_VERSION >= 11031
#if TORCH_HIP_VERSION < 12000
// The cuSPARSE Generic SpSM implementation uses the wrong stride here
// so the input and output stride need to match
auto in_stride = 4;
auto out_stride = 4;
#else
// The cuSPARSE 12 solver is even worse: It only works if the stride is
// equal to the number of columns.
auto in_stride = 3;
auto out_stride = 3;
#endif
#else
auto in_stride = 4;
auto out_stride = 5;
#endif
auto lower_trs_factory =
gko::solver::LowerTrs<>::build().with_num_rhs(3u).on(ref);
auto d_lower_trs_factory =
gko::solver::LowerTrs<>::build().with_num_rhs(3u).on(exec);
auto solver = lower_trs_factory->generate(csr_mtx);
auto d_solver = d_lower_trs_factory->generate(d_csr_mtx);
auto db2_strided = Mtx::create(exec, b->get_size(), in_stride);
d_b2->convert_to(db2_strided);
auto dx_strided = Mtx::create(exec, x->get_size(), out_stride);
solver->apply(b2, x);
d_solver->apply(db2_strided, dx_strided);
GKO_ASSERT_MTX_NEAR(dx_strided, x, 1e-14);
}
TEST_F(LowerTrs, CudaApplyThrowsWithWrongNumRHS)
{
initialize_data(50, 3);
auto d_lower_trs_factory = gko::solver::LowerTrs<>::build().on(exec);
auto d_solver = d_lower_trs_factory->generate(d_csr_mtx);
ASSERT_THROW(d_solver->apply(d_b2, d_x), gko::ValueMismatch);
}
} // namespace
| c5935350f9d717a0b6e0667bf901bb6a61e60de0.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2023, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include <memory>
#include <random>
#include <cuda.h>
#include <gtest/gtest.h>
#include <ginkgo/core/base/exception.hpp>
#include <ginkgo/core/base/executor.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/dense.hpp>
#include <ginkgo/core/solver/triangular.hpp>
#include "core/solver/lower_trs_kernels.hpp"
#include "cuda/test/utils.hpp"
namespace {
class LowerTrs : public CudaTestFixture {
protected:
using CsrMtx = gko::matrix::Csr<double, gko::int32>;
using Mtx = gko::matrix::Dense<>;
LowerTrs() : rand_engine(30) {}
std::unique_ptr<Mtx> gen_mtx(int num_rows, int num_cols)
{
return gko::test::generate_random_matrix<Mtx>(
num_rows, num_cols,
std::uniform_int_distribution<>(num_cols, num_cols),
std::normal_distribution<>(-1.0, 1.0), rand_engine, ref);
}
std::unique_ptr<Mtx> gen_l_mtx(int size)
{
return gko::test::generate_random_lower_triangular_matrix<Mtx>(
size, false, std::uniform_int_distribution<>(size),
std::normal_distribution<>(-1.0, 1.0), rand_engine, ref);
}
void initialize_data(int m, int n)
{
mtx = gen_l_mtx(m);
b = gen_mtx(m, n);
x = gen_mtx(m, n);
csr_mtx = CsrMtx::create(ref);
mtx->convert_to(csr_mtx);
d_csr_mtx = CsrMtx::create(exec);
d_x = gko::clone(exec, x);
d_csr_mtx->copy_from(csr_mtx);
b2 = Mtx::create(ref);
d_b2 = gko::clone(exec, b);
b2->copy_from(b);
}
std::shared_ptr<Mtx> b;
std::shared_ptr<Mtx> b2;
std::shared_ptr<Mtx> x;
std::shared_ptr<Mtx> mtx;
std::shared_ptr<CsrMtx> csr_mtx;
std::shared_ptr<Mtx> d_b;
std::shared_ptr<Mtx> d_b2;
std::shared_ptr<Mtx> d_x;
std::shared_ptr<CsrMtx> d_csr_mtx;
std::default_random_engine rand_engine;
};
TEST_F(LowerTrs, CudaLowerTrsFlagCheckIsCorrect)
{
bool trans_flag = true;
bool expected_flag = false;
gko::kernels::cuda::lower_trs::should_perform_transpose(exec, trans_flag);
ASSERT_EQ(expected_flag, trans_flag);
}
TEST_F(LowerTrs, CudaSingleRhsApplySyncfreeIsEquivalentToRef)
{
initialize_data(50, 1);
auto lower_trs_factory = gko::solver::LowerTrs<>::build().on(ref);
auto d_lower_trs_factory =
gko::solver::LowerTrs<>::build()
.with_algorithm(gko::solver::trisolve_algorithm::syncfree)
.on(exec);
auto solver = lower_trs_factory->generate(csr_mtx);
auto d_solver = d_lower_trs_factory->generate(d_csr_mtx);
solver->apply(b2, x);
d_solver->apply(d_b2, d_x);
GKO_ASSERT_MTX_NEAR(d_x, x, 1e-14);
}
TEST_F(LowerTrs, CudaSingleRhsApplyIsEquivalentToRef)
{
initialize_data(50, 1);
auto lower_trs_factory = gko::solver::LowerTrs<>::build().on(ref);
auto d_lower_trs_factory = gko::solver::LowerTrs<>::build().on(exec);
auto solver = lower_trs_factory->generate(csr_mtx);
auto d_solver = d_lower_trs_factory->generate(d_csr_mtx);
solver->apply(b2, x);
d_solver->apply(d_b2, d_x);
GKO_ASSERT_MTX_NEAR(d_x, x, 1e-14);
}
TEST_F(LowerTrs, CudaMultipleRhsApplySyncfreeIsEquivalentToRef)
{
initialize_data(50, 3);
auto lower_trs_factory =
gko::solver::LowerTrs<>::build().with_num_rhs(3u).on(ref);
auto d_lower_trs_factory =
gko::solver::LowerTrs<>::build()
.with_algorithm(gko::solver::trisolve_algorithm::syncfree)
.with_num_rhs(3u)
.on(exec);
auto solver = lower_trs_factory->generate(csr_mtx);
auto d_solver = d_lower_trs_factory->generate(d_csr_mtx);
auto db2_strided = Mtx::create(exec, b->get_size(), 4);
d_b2->convert_to(db2_strided);
auto dx_strided = Mtx::create(exec, x->get_size(), 5);
solver->apply(b2, x);
d_solver->apply(db2_strided, dx_strided);
GKO_ASSERT_MTX_NEAR(dx_strided, x, 1e-14);
}
TEST_F(LowerTrs, CudaMultipleRhsApplyIsEquivalentToRef)
{
initialize_data(50, 3);
#if CUDA_VERSION >= 11031
#if CUDA_VERSION < 12000
// The cuSPARSE Generic SpSM implementation uses the wrong stride here
// so the input and output stride need to match
auto in_stride = 4;
auto out_stride = 4;
#else
// The cuSPARSE 12 solver is even worse: It only works if the stride is
// equal to the number of columns.
auto in_stride = 3;
auto out_stride = 3;
#endif
#else
auto in_stride = 4;
auto out_stride = 5;
#endif
auto lower_trs_factory =
gko::solver::LowerTrs<>::build().with_num_rhs(3u).on(ref);
auto d_lower_trs_factory =
gko::solver::LowerTrs<>::build().with_num_rhs(3u).on(exec);
auto solver = lower_trs_factory->generate(csr_mtx);
auto d_solver = d_lower_trs_factory->generate(d_csr_mtx);
auto db2_strided = Mtx::create(exec, b->get_size(), in_stride);
d_b2->convert_to(db2_strided);
auto dx_strided = Mtx::create(exec, x->get_size(), out_stride);
solver->apply(b2, x);
d_solver->apply(db2_strided, dx_strided);
GKO_ASSERT_MTX_NEAR(dx_strided, x, 1e-14);
}
TEST_F(LowerTrs, CudaApplyThrowsWithWrongNumRHS)
{
initialize_data(50, 3);
auto d_lower_trs_factory = gko::solver::LowerTrs<>::build().on(exec);
auto d_solver = d_lower_trs_factory->generate(d_csr_mtx);
ASSERT_THROW(d_solver->apply(d_b2, d_x), gko::ValueMismatch);
}
} // namespace
|
b54debcdfdaf451e87f95cb26d6590b6d596d1a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaUpdateFiringRate_kernel(unsigned int * firingRate, unsigned int * totalFiringRatePartial, unsigned int inputsDimX, unsigned int inputsDimY, unsigned int inputsDimZ)
{
const unsigned int inputSize = inputsDimZ * inputsDimX * inputsDimY;
const unsigned int batchInputOffset = blockIdx.z * inputSize;
const unsigned int blockOffset = blockIdx.x * blockDim.x;
const unsigned int partialIdx = threadIdx.x + blockOffset;
extern __shared__ unsigned int partialSum[];
// Perform first level of reduction during initialization
// This is more efficient since we need all threads to load data
// but the partial sum will see only half of the threads active
//partialSum[threadIdx.x] = firingRate[partialIdx + batchInputOffset] +
// firingRate[partialIdx + blockDim.x + batchInputOffset];
partialSum[threadIdx.x] = 0;
if (partialIdx < inputSize){
partialSum[threadIdx.x] = firingRate[partialIdx + batchInputOffset];
}
__syncthreads();
// Reduction over neurons
for (int offset = blockDim.x/2; offset > 0; offset >>= 1) {
if (threadIdx.x < offset){
partialSum[threadIdx.x] += partialSum[threadIdx.x + offset];
}
__syncthreads();
}
if (threadIdx.x == 0) {
totalFiringRatePartial[blockIdx.x+gridDim.x*blockIdx.z] = partialSum[0];
}
} | b54debcdfdaf451e87f95cb26d6590b6d596d1a3.cu | #include "includes.h"
__global__ void cudaUpdateFiringRate_kernel(unsigned int * firingRate, unsigned int * totalFiringRatePartial, unsigned int inputsDimX, unsigned int inputsDimY, unsigned int inputsDimZ)
{
const unsigned int inputSize = inputsDimZ * inputsDimX * inputsDimY;
const unsigned int batchInputOffset = blockIdx.z * inputSize;
const unsigned int blockOffset = blockIdx.x * blockDim.x;
const unsigned int partialIdx = threadIdx.x + blockOffset;
extern __shared__ unsigned int partialSum[];
// Perform first level of reduction during initialization
// This is more efficient since we need all threads to load data
// but the partial sum will see only half of the threads active
//partialSum[threadIdx.x] = firingRate[partialIdx + batchInputOffset] +
// firingRate[partialIdx + blockDim.x + batchInputOffset];
partialSum[threadIdx.x] = 0;
if (partialIdx < inputSize){
partialSum[threadIdx.x] = firingRate[partialIdx + batchInputOffset];
}
__syncthreads();
// Reduction over neurons
for (int offset = blockDim.x/2; offset > 0; offset >>= 1) {
if (threadIdx.x < offset){
partialSum[threadIdx.x] += partialSum[threadIdx.x + offset];
}
__syncthreads();
}
if (threadIdx.x == 0) {
totalFiringRatePartial[blockIdx.x+gridDim.x*blockIdx.z] = partialSum[0];
}
} |
65e9f0414332f6359842277cc061b2ffad3c5209.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/core/Tensor.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/ThrustAllocator.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
#include <ATen/SparseTensorUtils.h>
#include <c10/macros/Macros.h>
#include <c10/util/accumulate.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_coalesce_native.h>
#include <ATen/ops/_sparse_coo_tensor_unsafe_native.h>
#include <ATen/ops/_sparse_mask_helper_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/zeros.h>
#endif
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/binary_search.h>
#include <c10/macros/Macros.h>
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
namespace {
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void _sparse_mask_copy_kernel(
int64_t total_threads,
int64_t t_nnz,
const TensorInfo<int64_t, int64_t> t_indices_ti,
const TensorInfo<int64_t, int64_t> mask_indices_ti,
const TensorInfo<int64_t, int64_t> t_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> t_values_ti,
TensorInfo<scalar_t, int64_t> r_values_ti) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = t_indices_pos_ti.data[i];
bool has_match = false;
if (j >= 0 && j < t_nnz && t_indices_ti.data[j] == mask_indices_ti.data[i]) {
has_match = true;
}
int64_t values_stride0 = r_values_ti.strides[0];
int64_t out_start = i * values_stride0;
int64_t out_end = (i + 1) * values_stride0;
int64_t in_start = j * t_values_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
r_values_ti.data[out_i] = t_values_ti.data[in_i];
}
}
}
} // end namespace
SparseTensor _coalesce_sparse_cuda(const SparseTensor& self) {
int64_t nnz = self._nnz();
TORCH_INTERNAL_ASSERT(!self.is_coalesced());
// NOTE: Since `coalesce` is not an in-place operation when `is_coalesced` is false,
// we should keep the original tensor intact and do coalesce on a copy of the tensor
if (nnz < 2) {
SparseTensor dst = self.clone();
dst._coalesced_(true);
return dst;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::hip::par(allocator).on(stream);
// Replace instances with
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
Tensor values = self._values();
int64_t sparse_dim = self.sparse_dim();
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
Tensor indices1D = flatten_indices(self._indices(), self.sizes(), true);
Tensor origIndices = at::empty({nnz}, self._indices().options());
Tensor uniqueOffsets = at::empty({nnz}, self._indices().options());
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(indices1D.data_ptr<int64_t>());
thrust_ptr origIndicesIter(origIndices.data_ptr<int64_t>());
thrust_ptr uniqueOffsetsIter(uniqueOffsets.data_ptr<int64_t>());
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(0);
thrust::counting_iterator<int64_t> countIterO(0);
thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter);
thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter);
thrust::sort_by_key(policy,
indicesIter, indicesIter + nnz,
origIndicesIter, LTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
indices1D.resize_({1, newNnz});
auto newValues_size = values.sizes().vec();
newValues_size[0] = newNnz;
Tensor newValues = at::empty(newValues_size, values.options());
// If there is no values to copy, save running the kernel.
if (newValues.numel() > 0) {
const int SZ = 4;
values = values.contiguous();
int64_t stride = c10::multiply_integers(values.sizes().slice(1));
dim3 grid(ceil_div(newNnz, (int64_t) SZ), ceil_div(stride, (int64_t) C10_WARP_SIZE*SZ));
dim3 block(C10_WARP_SIZE, SZ);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, values.scalar_type(), "coalesce_sparse_cuda", [&] {
using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>;
hipLaunchKernelGGL(( apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t>), dim3(grid), dim3(block), 0, stream,
uniqueOffsets.data_ptr<int64_t>(),
origIndices.data_ptr<int64_t>(),
values.data_ptr<scalar_t>(),
newValues.data_ptr<scalar_t>(),
nnz,
newNnz,
stride
);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, ceil_div((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream> >>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
// C10_HIP_KERNEL_LAUNCH_CHECK();
////////////////////////////////////////////////////////////
// unflatten indices if necessary
Tensor newIndices;
if (sparse_dim == 1) {
newIndices = indices1D;
} else {
newIndices = at::empty({sparse_dim, newNnz}, origIndices.options());
for (int64_t d = sparse_dim - 1; d >= 0; d--) {
// NB: Not a select, so I can preserve the outer dimension
Tensor indicesSlice = newIndices.narrow(0, d, 1);
// Note for the porting guide: THCTensor_(copy) does NOT do normal
// broadcasting logic; instead, it will blast the elements from one
// to the other so long as the numel is the same
indicesSlice.copy_(indices1D);
indices1D.divide_(self.size(d), "trunc");
indicesSlice.add_(indices1D, -self.size(d));
}
}
////////////////////////////////////////////////////////////
// We can use unsafe sparse tensor constructor because the indices do not
// need to be revalidated as we do not add or change indices, just remove
// duplicates.
SparseTensor dst = ::at::native::_sparse_coo_tensor_unsafe(newIndices, newValues, self.sizes())._coalesced_(true);
AT_CUDA_CHECK(hipGetLastError());
return dst;
}
Tensor sparse_mask_helper_cuda(
const SparseTensor& t,
const Tensor& mask_indices) {
/*
This is a helper function which filter values from `t._values()` using the
`mask_indices`. This CUDA implementation uses `thrust::lower_bound`
operation to find the intersection of the `mask_indices` and the
`t._indices()` to then filter the values.
Inputs:
`t` - coalesced sparse tensor input
`mask_indices` - mask indices tensor
Note: The nnz in the output tensor will be same as the `mask_indices`. So it will
works independently if the mask is coalesced or not.
*/
TORCH_CHECK(t.is_sparse(), "t: input is not a sparse tensor");
TORCH_CHECK(t.is_coalesced(), "t: input is uncoalesced");
TORCH_CHECK(mask_indices.dim() == t._indices().dim(), "mask_indices: operands have incompatible indices dim; self has dim ",
t._indices().dim(), " but mask has dim ", mask_indices.dim());
TORCH_CHECK(mask_indices.is_contiguous(), "mask_indices: mask is not contiguous");
int64_t r_nnz = mask_indices.size(1);
auto t_values = t._values().contiguous();
auto full_size = t.sizes();
auto vsize = t_values.sizes().vec();
vsize[0] = r_nnz;
if (t.sparse_dim() == 0) {
Tensor t_values_expand = t_values;
t_values_expand = t_values_expand.expand(vsize).contiguous();
return t_values_expand;
}
Tensor r_values = at::zeros({vsize}, t_values.options());
auto t_indices = t._indices().contiguous();
auto t_nnz = t._nnz();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::hip::par(allocator).on(stream);
// Step 1: flatten the sparse indices `t._indices()` tensor into a 1D indices
// tensor `t_flatten_indices`.
auto t_flatten_indices = at::sparse::flatten_indices(t_indices, full_size).contiguous();
// Step 2: flatten the sparse indices `mask_indices` tensor into a 1D indices
// tensor `mask_flatten_indices`. Note: This could be not sorted if the input
// indices in the constructor are not in a coalesced form
auto flattened_mask_indices =
at::sparse::flatten_indices(mask_indices, full_size);
Tensor t_indices_pos = at::empty({r_nnz}, mask_indices.options());
// Step 3: Match the flattened `mask_indices` with the flattened
// `t._indices()` using the `thrust::lower_bound`
thrust::lower_bound(
policy,
t_flatten_indices.data_ptr<int64_t>(),
t_flatten_indices.data_ptr<int64_t>() + t_nnz,
flattened_mask_indices.data_ptr<int64_t>(),
flattened_mask_indices.data_ptr<int64_t>() + r_nnz,
t_indices_pos.data_ptr<int64_t>());
// Step 4: Copy the Filtered `t._values()` using the matches at `t_indices_pos`
if (r_nnz > 0 && t_values.numel() > 0) {
int64_t block_size = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
auto grid_size = ceil_div(r_nnz, block_size);
auto t_indices_ti = getTensorInfo<int64_t, int64_t>(t_flatten_indices);
auto mask_indices_ti =
getTensorInfo<int64_t, int64_t>(flattened_mask_indices);
auto t_indices_pos_ti =
getTensorInfo<int64_t, int64_t>(t_indices_pos);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf,
r_values.scalar_type(), "sparse_mask_helper_cuda", [&] {
auto t_values_ti = getTensorInfo<scalar_t, int64_t>(t_values);
auto r_values_ti =
getTensorInfo<scalar_t, int64_t>(r_values);
hipLaunchKernelGGL(( _sparse_mask_copy_kernel<scalar_t>), dim3(grid_size), dim3(block_size), 0, stream,
r_nnz,
t_nnz,
t_indices_ti,
mask_indices_ti,
t_indices_pos_ti,
t_values_ti,
r_values_ti);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return r_values;
}
}} // namespace at::native
| 65e9f0414332f6359842277cc061b2ffad3c5209.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/core/Tensor.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/ThrustAllocator.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <ATen/SparseTensorUtils.h>
#include <c10/macros/Macros.h>
#include <c10/util/accumulate.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_coalesce_native.h>
#include <ATen/ops/_sparse_coo_tensor_unsafe_native.h>
#include <ATen/ops/_sparse_mask_helper_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/zeros.h>
#endif
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/binary_search.h>
#include <c10/macros/Macros.h>
namespace at { namespace native {
using namespace at::sparse;
using at::cuda::detail::TensorInfo;
using at::cuda::detail::getTensorInfo;
namespace {
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void _sparse_mask_copy_kernel(
int64_t total_threads,
int64_t t_nnz,
const TensorInfo<int64_t, int64_t> t_indices_ti,
const TensorInfo<int64_t, int64_t> mask_indices_ti,
const TensorInfo<int64_t, int64_t> t_indices_pos_ti,
const TensorInfo<scalar_t, int64_t> t_values_ti,
TensorInfo<scalar_t, int64_t> r_values_ti) {
const int64_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= total_threads) return;
const int64_t j = t_indices_pos_ti.data[i];
bool has_match = false;
if (j >= 0 && j < t_nnz && t_indices_ti.data[j] == mask_indices_ti.data[i]) {
has_match = true;
}
int64_t values_stride0 = r_values_ti.strides[0];
int64_t out_start = i * values_stride0;
int64_t out_end = (i + 1) * values_stride0;
int64_t in_start = j * t_values_ti.strides[0];
if (has_match) {
for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) {
r_values_ti.data[out_i] = t_values_ti.data[in_i];
}
}
}
} // end namespace
SparseTensor _coalesce_sparse_cuda(const SparseTensor& self) {
int64_t nnz = self._nnz();
TORCH_INTERNAL_ASSERT(!self.is_coalesced());
// NOTE: Since `coalesce` is not an in-place operation when `is_coalesced` is false,
// we should keep the original tensor intact and do coalesce on a copy of the tensor
if (nnz < 2) {
SparseTensor dst = self.clone();
dst._coalesced_(true);
return dst;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::cuda::par(allocator).on(stream);
// Replace instances with
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
Tensor values = self._values();
int64_t sparse_dim = self.sparse_dim();
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
Tensor indices1D = flatten_indices(self._indices(), self.sizes(), true);
Tensor origIndices = at::empty({nnz}, self._indices().options());
Tensor uniqueOffsets = at::empty({nnz}, self._indices().options());
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(indices1D.data_ptr<int64_t>());
thrust_ptr origIndicesIter(origIndices.data_ptr<int64_t>());
thrust_ptr uniqueOffsetsIter(uniqueOffsets.data_ptr<int64_t>());
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(0);
thrust::counting_iterator<int64_t> countIterO(0);
thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter);
thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter);
thrust::sort_by_key(policy,
indicesIter, indicesIter + nnz,
origIndicesIter, LTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
indices1D.resize_({1, newNnz});
auto newValues_size = values.sizes().vec();
newValues_size[0] = newNnz;
Tensor newValues = at::empty(newValues_size, values.options());
// If there is no values to copy, save running the kernel.
if (newValues.numel() > 0) {
const int SZ = 4;
values = values.contiguous();
int64_t stride = c10::multiply_integers(values.sizes().slice(1));
dim3 grid(ceil_div(newNnz, (int64_t) SZ), ceil_div(stride, (int64_t) C10_WARP_SIZE*SZ));
dim3 block(C10_WARP_SIZE, SZ);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16, values.scalar_type(), "coalesce_sparse_cuda", [&] {
using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>;
apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t><<<grid, block, 0, stream>>>(
uniqueOffsets.data_ptr<int64_t>(),
origIndices.data_ptr<int64_t>(),
values.data_ptr<scalar_t>(),
newValues.data_ptr<scalar_t>(),
nnz,
newNnz,
stride
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, ceil_div((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream> >>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
// C10_CUDA_KERNEL_LAUNCH_CHECK();
////////////////////////////////////////////////////////////
// unflatten indices if necessary
Tensor newIndices;
if (sparse_dim == 1) {
newIndices = indices1D;
} else {
newIndices = at::empty({sparse_dim, newNnz}, origIndices.options());
for (int64_t d = sparse_dim - 1; d >= 0; d--) {
// NB: Not a select, so I can preserve the outer dimension
Tensor indicesSlice = newIndices.narrow(0, d, 1);
// Note for the porting guide: THCTensor_(copy) does NOT do normal
// broadcasting logic; instead, it will blast the elements from one
// to the other so long as the numel is the same
indicesSlice.copy_(indices1D);
indices1D.divide_(self.size(d), "trunc");
indicesSlice.add_(indices1D, -self.size(d));
}
}
////////////////////////////////////////////////////////////
// We can use unsafe sparse tensor constructor because the indices do not
// need to be revalidated as we do not add or change indices, just remove
// duplicates.
SparseTensor dst = ::at::native::_sparse_coo_tensor_unsafe(newIndices, newValues, self.sizes())._coalesced_(true);
AT_CUDA_CHECK(cudaGetLastError());
return dst;
}
Tensor sparse_mask_helper_cuda(
const SparseTensor& t,
const Tensor& mask_indices) {
/*
This is a helper function which filter values from `t._values()` using the
`mask_indices`. This CUDA implementation uses `thrust::lower_bound`
operation to find the intersection of the `mask_indices` and the
`t._indices()` to then filter the values.
Inputs:
`t` - coalesced sparse tensor input
`mask_indices` - mask indices tensor
Note: The nnz in the output tensor will be same as the `mask_indices`. So it will
works independently if the mask is coalesced or not.
*/
TORCH_CHECK(t.is_sparse(), "t: input is not a sparse tensor");
TORCH_CHECK(t.is_coalesced(), "t: input is uncoalesced");
TORCH_CHECK(mask_indices.dim() == t._indices().dim(), "mask_indices: operands have incompatible indices dim; self has dim ",
t._indices().dim(), " but mask has dim ", mask_indices.dim());
TORCH_CHECK(mask_indices.is_contiguous(), "mask_indices: mask is not contiguous");
int64_t r_nnz = mask_indices.size(1);
auto t_values = t._values().contiguous();
auto full_size = t.sizes();
auto vsize = t_values.sizes().vec();
vsize[0] = r_nnz;
if (t.sparse_dim() == 0) {
Tensor t_values_expand = t_values;
t_values_expand = t_values_expand.expand(vsize).contiguous();
return t_values_expand;
}
Tensor r_values = at::zeros({vsize}, t_values.options());
auto t_indices = t._indices().contiguous();
auto t_nnz = t._nnz();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::cuda::par(allocator).on(stream);
// Step 1: flatten the sparse indices `t._indices()` tensor into a 1D indices
// tensor `t_flatten_indices`.
auto t_flatten_indices = at::sparse::flatten_indices(t_indices, full_size).contiguous();
// Step 2: flatten the sparse indices `mask_indices` tensor into a 1D indices
// tensor `mask_flatten_indices`. Note: This could be not sorted if the input
// indices in the constructor are not in a coalesced form
auto flattened_mask_indices =
at::sparse::flatten_indices(mask_indices, full_size);
Tensor t_indices_pos = at::empty({r_nnz}, mask_indices.options());
// Step 3: Match the flattened `mask_indices` with the flattened
// `t._indices()` using the `thrust::lower_bound`
thrust::lower_bound(
policy,
t_flatten_indices.data_ptr<int64_t>(),
t_flatten_indices.data_ptr<int64_t>() + t_nnz,
flattened_mask_indices.data_ptr<int64_t>(),
flattened_mask_indices.data_ptr<int64_t>() + r_nnz,
t_indices_pos.data_ptr<int64_t>());
// Step 4: Copy the Filtered `t._values()` using the matches at `t_indices_pos`
if (r_nnz > 0 && t_values.numel() > 0) {
int64_t block_size = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
auto grid_size = ceil_div(r_nnz, block_size);
auto t_indices_ti = getTensorInfo<int64_t, int64_t>(t_flatten_indices);
auto mask_indices_ti =
getTensorInfo<int64_t, int64_t>(flattened_mask_indices);
auto t_indices_pos_ti =
getTensorInfo<int64_t, int64_t>(t_indices_pos);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf,
r_values.scalar_type(), "sparse_mask_helper_cuda", [&] {
auto t_values_ti = getTensorInfo<scalar_t, int64_t>(t_values);
auto r_values_ti =
getTensorInfo<scalar_t, int64_t>(r_values);
_sparse_mask_copy_kernel<scalar_t><<<grid_size, block_size, 0, stream>>>(
r_nnz,
t_nnz,
t_indices_ti,
mask_indices_ti,
t_indices_pos_ti,
t_values_ti,
r_values_ti);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return r_values;
}
}} // namespace at::native
|
9d8ad80f441beb0ab943a36dfc54584449af9eaa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathReduce.hip"
#else
void THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
const accreal size = scalar_cast<accreal>(THCTensor_(size)(state, src, dim));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
ReduceDivide<accreal>{size},
scalar_cast<accreal>(0),
dim,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<scalar_t, accreal>)
, dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
void THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, true},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, false},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
accreal THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
accreal THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (biased ? 0 : 1)))
);
THCudaCheck(hipGetLastError());
return val;
}
void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(INFINITY),
dimension, keepdim);
} else {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(hipGetLastError());
}
accreal THCTensor_(normall)(THCState *state, THCTensor *self, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll<scalar_t>(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
scalar_cast<accreal>(INFINITY),
&result, 0);
} else {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(hipGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<scalar_t> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
ReduceMax<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(INFINITY),
ReduceMin<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(0)));
} else {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(value));
result = THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return result;
}
#endif
accreal THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
accreal THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
scalar_cast<accreal>(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
accreal THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::upper_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<scalar_t>(val);
}
scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::lower_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<scalar_t>(val);
}
scalar_t THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
scalar_t val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THCTensor *view = THCTensor_(newView)(state, self, {nelem});
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(hipGetLastError());
return val;
}
void THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor *newValues = THCTensor_(newNarrow)(state, sorted, dimension, k, 1);
THCudaLongTensor *newIndices = THCudaLongTensor_newNarrow(state, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, newValues, newValues, dimension);
THCudaLongTensor_squeeze1d(state, newIndices, newIndices, dimension);
}
THCTensor_(resizeAs)(state, values, newValues);
THCudaLongTensor_resizeAs(state, indices, newIndices);
THCTensor_(copy)(state, values, newValues);
THCudaLongTensor_copy(state, indices, newIndices);
THCTensor_(free)(state, newValues);
THCudaLongTensor_free(state, newIndices);
THCudaCheck(hipGetLastError());
}
void THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::lower_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<scalar_t, int64_t>());
}
void THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::upper_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<scalar_t, int64_t>());
}
#endif
| 9d8ad80f441beb0ab943a36dfc54584449af9eaa.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathReduce.cu"
#else
void THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
const accreal size = scalar_cast<accreal>(THCTensor_(size)(state, src, dim));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
ReduceDivide<accreal>{size},
scalar_cast<accreal>(0),
dim,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
THCTensor_kernel_renorm<scalar_t, accreal>
<<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
cudaError_t errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
void THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, true},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, false},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
accreal THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
accreal THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (biased ? 0 : 1)))
);
THCudaCheck(cudaGetLastError());
return val;
}
void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(INFINITY),
dimension, keepdim);
} else {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(cudaGetLastError());
}
accreal THCTensor_(normall)(THCState *state, THCTensor *self, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll<scalar_t>(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
scalar_cast<accreal>(INFINITY),
&result, 0);
} else {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(cudaGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<scalar_t> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
ReduceMax<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(INFINITY),
ReduceMin<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(0)));
} else {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(value));
result = THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return result;
}
#endif
accreal THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
accreal THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
scalar_cast<accreal>(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
accreal THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::upper_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<scalar_t>(val);
}
scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::lower_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<scalar_t>(val);
}
scalar_t THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
scalar_t val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THCTensor *view = THCTensor_(newView)(state, self, {nelem});
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(cudaGetLastError());
return val;
}
void THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor *newValues = THCTensor_(newNarrow)(state, sorted, dimension, k, 1);
THCudaLongTensor *newIndices = THCudaLongTensor_newNarrow(state, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, newValues, newValues, dimension);
THCudaLongTensor_squeeze1d(state, newIndices, newIndices, dimension);
}
THCTensor_(resizeAs)(state, values, newValues);
THCudaLongTensor_resizeAs(state, indices, newIndices);
THCTensor_(copy)(state, values, newValues);
THCudaLongTensor_copy(state, indices, newIndices);
THCTensor_(free)(state, newValues);
THCudaLongTensor_free(state, newIndices);
THCudaCheck(cudaGetLastError());
}
void THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::lower_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<scalar_t, int64_t>());
}
void THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::upper_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<scalar_t, int64_t>());
}
#endif
|
434a1526f68a7d752633a728c93494cc0bc577f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* mct_transform.c
*
* Created on: Nov 30, 2011
* Author: miloszc
*/
extern "C" {
#include "../misc/memory_management.cuh"
#include "../print_info/print_info.h"
}
#include "mct_transform.h"
#include "adjust.h"
#include "../misc/cuda_errors.h"
void mct_transform(type_image *img, type_data* transform_d, type_data **data_pd, int odciecie) {
int i = 0;
type_data* data_d;
type_data* inter_d;
cuda_d_allocate_mem((void **) &data_d, img->num_components * sizeof(type_data));
cuda_d_allocate_mem((void **) &inter_d, (img->num_components - odciecie) * sizeof(type_data));
for(i=0; i<img->width*img->height; ++i) {
hipDeviceSynchronize();
hipLaunchKernelGGL(( readSampleSimple), dim3(1),dim3(img->num_components), 0, 0, data_pd, i, data_d);
checkCUDAError("\tafter data read");
// println_var(INFO, "\tsample %d read",i);
hipDeviceSynchronize();
adjust_pca_data_mm(transform_d, FORWARD, data_d, inter_d, img->num_components, img->num_components);
checkCUDAError("\tafter data transform");
// println_var(INFO, "\tsample %d transform",i);
hipDeviceSynchronize();
hipLaunchKernelGGL(( writeSampleSimple), dim3(1),dim3(img->num_components-odciecie), 0, 0, data_pd, i, inter_d);
checkCUDAError("\tafter data write");
// println_var(INFO, "\tsample %d write",i);
}
}
void mct_transform_new(type_image *img, type_data* transform_d, type_data **data_pd, int odciecie) {
int i = 0;
int num_vecs = img->width * img->height;
int len_vec = img->num_components;
type_data* data_d;
type_data* inter_d;
cuda_d_allocate_mem((void **) &data_d, num_vecs * len_vec * sizeof(type_data));
cuda_d_allocate_mem((void **) &inter_d, num_vecs * len_vec * sizeof(type_data));
int blocks = (num_vecs + (THREADS - 1))/THREADS;
hipLaunchKernelGGL(( readSamples), dim3(blocks), dim3(THREADS), 0, 0, data_pd, num_vecs, len_vec, data_d);
for(i = 0; i < num_vecs; ++i) {
type_data *i_vec = data_d + i * len_vec;
type_data *o_vec = inter_d + i * len_vec;
adjust_pca_data_mv(transform_d, FORWARD, i_vec, o_vec, len_vec, len_vec);
}
hipLaunchKernelGGL(( writeSamples), dim3(blocks), dim3(THREADS), 0, 0, data_pd, num_vecs, len_vec, inter_d);
cuda_d_free(data_d);
cuda_d_free(inter_d);
}
| 434a1526f68a7d752633a728c93494cc0bc577f7.cu | /*
* mct_transform.c
*
* Created on: Nov 30, 2011
* Author: miloszc
*/
extern "C" {
#include "../misc/memory_management.cuh"
#include "../print_info/print_info.h"
}
#include "mct_transform.h"
#include "adjust.h"
#include "../misc/cuda_errors.h"
void mct_transform(type_image *img, type_data* transform_d, type_data **data_pd, int odciecie) {
int i = 0;
type_data* data_d;
type_data* inter_d;
cuda_d_allocate_mem((void **) &data_d, img->num_components * sizeof(type_data));
cuda_d_allocate_mem((void **) &inter_d, (img->num_components - odciecie) * sizeof(type_data));
for(i=0; i<img->width*img->height; ++i) {
cudaThreadSynchronize();
readSampleSimple<<<1,img->num_components>>>(data_pd, i, data_d);
checkCUDAError("\tafter data read");
// println_var(INFO, "\tsample %d read",i);
cudaThreadSynchronize();
adjust_pca_data_mm(transform_d, FORWARD, data_d, inter_d, img->num_components, img->num_components);
checkCUDAError("\tafter data transform");
// println_var(INFO, "\tsample %d transform",i);
cudaThreadSynchronize();
writeSampleSimple<<<1,img->num_components-odciecie>>>(data_pd, i, inter_d);
checkCUDAError("\tafter data write");
// println_var(INFO, "\tsample %d write",i);
}
}
void mct_transform_new(type_image *img, type_data* transform_d, type_data **data_pd, int odciecie) {
int i = 0;
int num_vecs = img->width * img->height;
int len_vec = img->num_components;
type_data* data_d;
type_data* inter_d;
cuda_d_allocate_mem((void **) &data_d, num_vecs * len_vec * sizeof(type_data));
cuda_d_allocate_mem((void **) &inter_d, num_vecs * len_vec * sizeof(type_data));
int blocks = (num_vecs + (THREADS - 1))/THREADS;
readSamples<<<blocks, THREADS>>>(data_pd, num_vecs, len_vec, data_d);
for(i = 0; i < num_vecs; ++i) {
type_data *i_vec = data_d + i * len_vec;
type_data *o_vec = inter_d + i * len_vec;
adjust_pca_data_mv(transform_d, FORWARD, i_vec, o_vec, len_vec, len_vec);
}
writeSamples<<<blocks, THREADS>>>(data_pd, num_vecs, len_vec, inter_d);
cuda_d_free(data_d);
cuda_d_free(inter_d);
}
|
1fa481f6ced4803619e5cfb6fa8a4acd5c741137.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
**********************************************
* CS314 Principles of Programming Languages *
* Fall 2020 *
**********************************************
*/
#include "utils.hpp"
#include "gpuHeaders.cuh"
#include "extra.cu"
#include <iostream>
using namespace std;
#define threadsPerBlock 256
int one_way_handshake(GraphData graph, int *& matches, int numthreads, bool extra_credit)
{
int num_thread_blocks = (numthreads + threadsPerBlock - 1) / threadsPerBlock;
int numVertices = graph.numNodes;
int numEdges = graph.numEdges;
//Prepare various GPU arrays that we're going to need:
int * strongNeighbor_gpu;//will hold strongest neighbor for each vertex
int * matches_gpu;//will hold the output
int * src_gpu;//holds the src nodes in edge list
int * dst_gpu;//holds the dst nodes in edge list
int * weight_gpu;//holds the edge weights in edge list
int * temp1_gpu;//a temporary array for data we don't need to keep for long
int * temp2_gpu;//a temporary array for data we don't need to keep for long
int * temp3_gpu;//a temporary array for data we don't need to keep for long
int * temp4_gpu;//a temporary array for data we don't need to keep for long
/** YOUR CODE GOES BELOW (allocate GPU memory, and copy from CPU to GPU as appropriate **/
hipMalloc((void**)&strongNeighbor_gpu, numVertices*sizeof(int));
hipMalloc((void**)&matches_gpu, numVertices*sizeof(int));
hipMalloc((void**)&src_gpu, (numEdges)*sizeof(int));
hipMalloc((void**)&dst_gpu, numEdges*sizeof(int));
hipMalloc((void**)&weight_gpu, numEdges*sizeof(int));
hipMalloc((void**)&temp1_gpu, (numEdges+1) * sizeof(int));
hipMalloc((void**)&temp2_gpu, (numEdges+1) * sizeof(int));
hipMalloc((void**)&temp3_gpu, (numEdges+1) * sizeof(int));
hipMalloc((void**)&temp4_gpu, (numEdges+1) * sizeof(int));
hipMemcpy(matches_gpu, matches, sizeof(int) * numVertices, hipMemcpyHostToDevice);
hipMemcpy(src_gpu, graph.src, sizeof(int) * (numEdges), hipMemcpyHostToDevice);
hipMemcpy(dst_gpu, graph.dst, sizeof(int) * numEdges, hipMemcpyHostToDevice);
hipMemcpy(weight_gpu, graph.weight, sizeof(int) * numEdges, hipMemcpyHostToDevice);
/** YOUR CODE GOES ABOVE **/
//matching loop
int iter;
for (iter = 0; ; iter++) {
if(extra_credit) {
/** YOUR CODE GOES BELOW (extra credit) **/
/** YOUR CODE GOES ABOVE (extra credit) **/
} else {
//Step 1: Get strongest neighbor for each vertex/node
int * strongNeighbor_cpu = (int *) malloc(sizeof(int) * numVertices);
int * strongNeighborWeight_cpu = (int *) malloc(sizeof(int) * numVertices);
for(int x = 0; x < numVertices; x++) {
strongNeighbor_cpu[x] = -1;
}
for(int x = 0; x < numEdges; x++) {
int src = graph.src[x];
int dst = graph.dst[x];
int wgt = graph.weight[x];
//std::cerr << src << "->" << dst << ": " << wgt << "\n";
if(strongNeighbor_cpu[src] == -1 || strongNeighborWeight_cpu[src] < wgt) {
strongNeighbor_cpu[src] = dst;
strongNeighborWeight_cpu[src] = wgt;
}
}
//move data from CPU to GPU, and free the CPU arrays
hipMemcpy(strongNeighbor_gpu, strongNeighbor_cpu, numVertices * sizeof(int), hipMemcpyHostToDevice);
free(strongNeighbor_cpu);
free(strongNeighborWeight_cpu);
}
//Step 2: check for each vertex whether there's a handshake
hipLaunchKernelGGL(( check_handshaking_gpu), dim3(num_thread_blocks), dim3(threadsPerBlock), 0, 0, strongNeighbor_gpu, matches_gpu, numVertices);
//Step 3: filter
//Step 3a: decide which edges to keep (marked with a 1) versus filter (marked with a 0)
int * keepEdges_gpu = temp1_gpu;
temp1_gpu = NULL;
hipLaunchKernelGGL(( markFilterEdges_gpu), dim3(num_thread_blocks), dim3(threadsPerBlock), 0, 0, src_gpu, dst_gpu, matches_gpu, keepEdges_gpu, numEdges);
//Step 3b: get new indices (in edge list for next iteration) of the edges we're going to keep
int * newEdgeLocs_gpu = keepEdges_gpu;
keepEdges_gpu = NULL;
for(int distance = 0; distance <= numEdges; distance = max(1, distance * 2)) {
hipLaunchKernelGGL(( exclusive_prefix_sum_gpu), dim3(num_thread_blocks), dim3(threadsPerBlock), 0, 0, newEdgeLocs_gpu, temp2_gpu, distance, numEdges+1);
swapArray((void**) &newEdgeLocs_gpu, (void**) &temp2_gpu);
}
//note: temp1 is still in use, until we're done with newEdgeLocs_gpu
//Step 3c: check if we're done matching
int lastLoc = 0;
hipMemcpy(&lastLoc, &(newEdgeLocs_gpu[numEdges]), sizeof(int), hipMemcpyDeviceToHost);
if(lastLoc < 2) {
//termination: fewer than two nodes remain unmatched
break;
} else if(lastLoc == numEdges) {
//termination: no additional matches are possible
break;
}
//Step 3d: pack the src, dst, and weight arrays in accordance with new edge locations
hipLaunchKernelGGL(( packGraph_gpu), dim3(num_thread_blocks), dim3(threadsPerBlock), 0, 0, temp2_gpu, src_gpu, temp3_gpu, dst_gpu, temp4_gpu, weight_gpu, newEdgeLocs_gpu, numEdges);
swapArray((void**) &temp2_gpu, (void**) &src_gpu);
swapArray((void**) &temp3_gpu, (void**) &dst_gpu);
swapArray((void**) &temp4_gpu, (void**) &weight_gpu);
temp1_gpu = newEdgeLocs_gpu;
newEdgeLocs_gpu = NULL;
//note: now we're done with the current contents of all the temporary arrays
//Set new number of edges:
numEdges = lastLoc;
if(iter > numVertices) {
cerr << "Error: matching has been running too long; breaking loop now\n";
break;
}
if(!extra_credit) {
//Step 4: Copy new graph arrays to CPU
hipMemcpy(graph.src, src_gpu, numEdges * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(graph.dst, dst_gpu, numEdges * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(graph.weight, weight_gpu, numEdges * sizeof(int), hipMemcpyDeviceToHost);
}
}
hipMemcpy(matches, matches_gpu, numVertices * sizeof(int), hipMemcpyDeviceToHost);
//Wait until pending GPU operations are complete:
hipDeviceSynchronize();
//free GPU arrays
/** YOUR CODE GOES BELOW **/
hipFree(strongNeighbor_gpu);
hipFree(matches_gpu);
hipFree(src_gpu);
hipFree(dst_gpu);
hipFree(weight_gpu);
hipFree(temp1_gpu);
hipFree(temp2_gpu);
hipFree(temp3_gpu);
hipFree(temp4_gpu);
/** YOUR CODE GOES ABOVE **/
hipError_t hipError_t;
hipError_t = hipGetLastError();
if(hipError_t != hipSuccess) {
cerr << "Warning: one or more CUDA errors occurred. Try using cuda-gdb to debug. Error message: \n\t" <<hipGetErrorString(hipError_t) << "\n";
}
return iter + 1;
}
void one_way_handshake_wrapper(GraphData graph, int *& matches, int numthreads, bool extra_credit)
{
fprintf(stderr, "Start One Way Matching ... \n");
struct timeval beginTime, endTime;
setTime(&beginTime);
int iter = one_way_handshake(graph, matches, numthreads, extra_credit);
setTime(&endTime);
fprintf(stderr, "Done matching.\n");
fprintf(stderr, "Performed matching for %d iterations\n", iter);
fprintf(stderr, "One Way Handshaking Matching Time: %.2f ms\n",
getTime(&beginTime, &endTime));
}
| 1fa481f6ced4803619e5cfb6fa8a4acd5c741137.cu | /*
**********************************************
* CS314 Principles of Programming Languages *
* Fall 2020 *
**********************************************
*/
#include "utils.hpp"
#include "gpuHeaders.cuh"
#include "extra.cu"
#include <iostream>
using namespace std;
#define threadsPerBlock 256
int one_way_handshake(GraphData graph, int *& matches, int numthreads, bool extra_credit)
{
int num_thread_blocks = (numthreads + threadsPerBlock - 1) / threadsPerBlock;
int numVertices = graph.numNodes;
int numEdges = graph.numEdges;
//Prepare various GPU arrays that we're going to need:
int * strongNeighbor_gpu;//will hold strongest neighbor for each vertex
int * matches_gpu;//will hold the output
int * src_gpu;//holds the src nodes in edge list
int * dst_gpu;//holds the dst nodes in edge list
int * weight_gpu;//holds the edge weights in edge list
int * temp1_gpu;//a temporary array for data we don't need to keep for long
int * temp2_gpu;//a temporary array for data we don't need to keep for long
int * temp3_gpu;//a temporary array for data we don't need to keep for long
int * temp4_gpu;//a temporary array for data we don't need to keep for long
/** YOUR CODE GOES BELOW (allocate GPU memory, and copy from CPU to GPU as appropriate **/
cudaMalloc((void**)&strongNeighbor_gpu, numVertices*sizeof(int));
cudaMalloc((void**)&matches_gpu, numVertices*sizeof(int));
cudaMalloc((void**)&src_gpu, (numEdges)*sizeof(int));
cudaMalloc((void**)&dst_gpu, numEdges*sizeof(int));
cudaMalloc((void**)&weight_gpu, numEdges*sizeof(int));
cudaMalloc((void**)&temp1_gpu, (numEdges+1) * sizeof(int));
cudaMalloc((void**)&temp2_gpu, (numEdges+1) * sizeof(int));
cudaMalloc((void**)&temp3_gpu, (numEdges+1) * sizeof(int));
cudaMalloc((void**)&temp4_gpu, (numEdges+1) * sizeof(int));
cudaMemcpy(matches_gpu, matches, sizeof(int) * numVertices, cudaMemcpyHostToDevice);
cudaMemcpy(src_gpu, graph.src, sizeof(int) * (numEdges), cudaMemcpyHostToDevice);
cudaMemcpy(dst_gpu, graph.dst, sizeof(int) * numEdges, cudaMemcpyHostToDevice);
cudaMemcpy(weight_gpu, graph.weight, sizeof(int) * numEdges, cudaMemcpyHostToDevice);
/** YOUR CODE GOES ABOVE **/
//matching loop
int iter;
for (iter = 0; ; iter++) {
if(extra_credit) {
/** YOUR CODE GOES BELOW (extra credit) **/
/** YOUR CODE GOES ABOVE (extra credit) **/
} else {
//Step 1: Get strongest neighbor for each vertex/node
int * strongNeighbor_cpu = (int *) malloc(sizeof(int) * numVertices);
int * strongNeighborWeight_cpu = (int *) malloc(sizeof(int) * numVertices);
for(int x = 0; x < numVertices; x++) {
strongNeighbor_cpu[x] = -1;
}
for(int x = 0; x < numEdges; x++) {
int src = graph.src[x];
int dst = graph.dst[x];
int wgt = graph.weight[x];
//std::cerr << src << "->" << dst << ": " << wgt << "\n";
if(strongNeighbor_cpu[src] == -1 || strongNeighborWeight_cpu[src] < wgt) {
strongNeighbor_cpu[src] = dst;
strongNeighborWeight_cpu[src] = wgt;
}
}
//move data from CPU to GPU, and free the CPU arrays
cudaMemcpy(strongNeighbor_gpu, strongNeighbor_cpu, numVertices * sizeof(int), cudaMemcpyHostToDevice);
free(strongNeighbor_cpu);
free(strongNeighborWeight_cpu);
}
//Step 2: check for each vertex whether there's a handshake
check_handshaking_gpu<<<num_thread_blocks, threadsPerBlock>>>(strongNeighbor_gpu, matches_gpu, numVertices);
//Step 3: filter
//Step 3a: decide which edges to keep (marked with a 1) versus filter (marked with a 0)
int * keepEdges_gpu = temp1_gpu;
temp1_gpu = NULL;
markFilterEdges_gpu<<<num_thread_blocks, threadsPerBlock>>>(src_gpu, dst_gpu, matches_gpu, keepEdges_gpu, numEdges);
//Step 3b: get new indices (in edge list for next iteration) of the edges we're going to keep
int * newEdgeLocs_gpu = keepEdges_gpu;
keepEdges_gpu = NULL;
for(int distance = 0; distance <= numEdges; distance = max(1, distance * 2)) {
exclusive_prefix_sum_gpu<<<num_thread_blocks, threadsPerBlock>>>(newEdgeLocs_gpu, temp2_gpu, distance, numEdges+1);
swapArray((void**) &newEdgeLocs_gpu, (void**) &temp2_gpu);
}
//note: temp1 is still in use, until we're done with newEdgeLocs_gpu
//Step 3c: check if we're done matching
int lastLoc = 0;
cudaMemcpy(&lastLoc, &(newEdgeLocs_gpu[numEdges]), sizeof(int), cudaMemcpyDeviceToHost);
if(lastLoc < 2) {
//termination: fewer than two nodes remain unmatched
break;
} else if(lastLoc == numEdges) {
//termination: no additional matches are possible
break;
}
//Step 3d: pack the src, dst, and weight arrays in accordance with new edge locations
packGraph_gpu<<<num_thread_blocks, threadsPerBlock>>>(temp2_gpu, src_gpu, temp3_gpu, dst_gpu, temp4_gpu, weight_gpu, newEdgeLocs_gpu, numEdges);
swapArray((void**) &temp2_gpu, (void**) &src_gpu);
swapArray((void**) &temp3_gpu, (void**) &dst_gpu);
swapArray((void**) &temp4_gpu, (void**) &weight_gpu);
temp1_gpu = newEdgeLocs_gpu;
newEdgeLocs_gpu = NULL;
//note: now we're done with the current contents of all the temporary arrays
//Set new number of edges:
numEdges = lastLoc;
if(iter > numVertices) {
cerr << "Error: matching has been running too long; breaking loop now\n";
break;
}
if(!extra_credit) {
//Step 4: Copy new graph arrays to CPU
cudaMemcpy(graph.src, src_gpu, numEdges * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(graph.dst, dst_gpu, numEdges * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(graph.weight, weight_gpu, numEdges * sizeof(int), cudaMemcpyDeviceToHost);
}
}
cudaMemcpy(matches, matches_gpu, numVertices * sizeof(int), cudaMemcpyDeviceToHost);
//Wait until pending GPU operations are complete:
cudaDeviceSynchronize();
//free GPU arrays
/** YOUR CODE GOES BELOW **/
cudaFree(strongNeighbor_gpu);
cudaFree(matches_gpu);
cudaFree(src_gpu);
cudaFree(dst_gpu);
cudaFree(weight_gpu);
cudaFree(temp1_gpu);
cudaFree(temp2_gpu);
cudaFree(temp3_gpu);
cudaFree(temp4_gpu);
/** YOUR CODE GOES ABOVE **/
cudaError_t cudaError;
cudaError = cudaGetLastError();
if(cudaError != cudaSuccess) {
cerr << "Warning: one or more CUDA errors occurred. Try using cuda-gdb to debug. Error message: \n\t" <<cudaGetErrorString(cudaError) << "\n";
}
return iter + 1;
}
void one_way_handshake_wrapper(GraphData graph, int *& matches, int numthreads, bool extra_credit)
{
fprintf(stderr, "Start One Way Matching ... \n");
struct timeval beginTime, endTime;
setTime(&beginTime);
int iter = one_way_handshake(graph, matches, numthreads, extra_credit);
setTime(&endTime);
fprintf(stderr, "Done matching.\n");
fprintf(stderr, "Performed matching for %d iterations\n", iter);
fprintf(stderr, "One Way Handshaking Matching Time: %.2f ms\n",
getTime(&beginTime, &endTime));
}
|
4507869792910928dcbb9bd8c1d2f76b56d25e37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ float explicitLocalStepHeat( float unjpo, float unjmo, float unj, float r)
{
return (1 - 2 * r)*unj + r*unjmo + r * unjpo;
}
__global__ void explicitTimestepHeat( int size, float *d_currentVal, float *d_nextVal, float r )
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
if (i < 2)
{
d_nextVal[i] == 0;
}
else if (i > size - 2)
{
d_nextVal[i] == 0;
}
else
{
d_nextVal[i] = explicitLocalStepHeat(
d_currentVal[i + 1],
d_currentVal[i - 1],
d_currentVal[i],
r);
}
}
} | 4507869792910928dcbb9bd8c1d2f76b56d25e37.cu | #include "includes.h"
__device__ float explicitLocalStepHeat( float unjpo, float unjmo, float unj, float r)
{
return (1 - 2 * r)*unj + r*unjmo + r * unjpo;
}
__global__ void explicitTimestepHeat( int size, float *d_currentVal, float *d_nextVal, float r )
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
if (i < 2)
{
d_nextVal[i] == 0;
}
else if (i > size - 2)
{
d_nextVal[i] == 0;
}
else
{
d_nextVal[i] = explicitLocalStepHeat(
d_currentVal[i + 1],
d_currentVal[i - 1],
d_currentVal[i],
r);
}
}
} |
6c778c54810fe9fdc50773b986eee360af295758.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
/*
const Dtype* ct = top[0]->cpu_data();
for (int i = 0; i < 64; i++) {
std::cout << ct[i] << std::endl;
}
exit(0);
*/
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
| 6c778c54810fe9fdc50773b986eee360af295758.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
/*
const Dtype* ct = top[0]->cpu_data();
for (int i = 0; i < 64; i++) {
std::cout << ct[i] << std::endl;
}
exit(0);
*/
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
|
00ad1ad72dbea057fe05674f2f204674bcb0946f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#include "histogram1024.cuh"
#include <gloop/gloop.h>
#include <gloop/statistics.h>
///////////////////////////////////////////////////////////////////////////////
// This is nvidias histogram256 SDK example modded to do a 1024 point
// histogram
///////////////////////////////////////////////////////////////////////////////
//Total number of possible data values
#define BIN_COUNT 1024 // Changed from 256
#define HISTOGRAM_SIZE (BIN_COUNT * sizeof(unsigned int))
//Machine warp size
#ifndef __DEVICE_EMULATION__
//G80's warp size is 32 threads
#define WARP_LOG_SIZE 5
#else
//Emulation currently doesn't execute threads in coherent groups of 32 threads,
//which effectively means warp size of 1 thread for emulation modes
#define WARP_LOG_SIZE 0
#endif
//Warps in thread block
#define WARP_N 3
//Threads per block count
#ifdef HISTO_WG_SIZE_0
#define THREAD_N HISTO_WG_SIZE_0
#else
#define THREAD_N (WARP_N << WARP_LOG_SIZE)
#endif
//Per-block number of elements in histograms
#define BLOCK_MEMORY (WARP_N * BIN_COUNT)
#define IMUL(a, b) __mul24(a, b)
static __device__ void addData1024(volatile unsigned int* s_WarpHist, unsigned int data, unsigned int threadTag)
{
unsigned int count;
do {
count = s_WarpHist[data] & 0x07FFFFFFU;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
} while (s_WarpHist[data] != count);
}
static __global__ void performHistogram(unsigned int* d_Result, float* d_Data, float minimum, float maximum, int dataN, int cursor)
{
//Current global thread index
const int globalTid = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
//Total number of threads in the compute grid
const int numThreads = IMUL(blockDim.x, gridDim.x);
//WARP_LOG_SIZE higher bits of counter values are tagged
//by lower WARP_LOG_SIZE threadID bits
// Will correctly issue warning when compiling for debug (x<<32-0)
const unsigned int threadTag = threadIdx.x << (32 - WARP_LOG_SIZE);
//Shared memory cache for each warp in current thread block
//Declare as volatile to prevent incorrect compiler optimizations in addPixel()
volatile __shared__ unsigned int s_Hist[BLOCK_MEMORY];
//Current warp shared memory frame
const int warpBase = IMUL(threadIdx.x >> WARP_LOG_SIZE, BIN_COUNT);
//Clear shared memory buffer for current thread block before processing
for (int pos = threadIdx.x; pos < BLOCK_MEMORY; pos += blockDim.x)
s_Hist[pos] = 0;
__syncthreads();
//Cycle through the entire data set, update subhistograms for each warp
//Since threads in warps always execute the same instruction,
//we are safe with the addPixel trick
int pos = globalTid + cursor * numThreads;
{
int result = pos < dataN;
if (result) {
unsigned int data4 = ((d_Data[pos] - minimum) / (maximum - minimum)) * BIN_COUNT;
addData1024(s_Hist + warpBase, data4 & 0x3FFU, threadTag);
}
__syncthreads();
//Merge per-warp histograms into per-block and write to global memory
for (int pos = threadIdx.x; pos < BIN_COUNT; pos += blockDim.x) {
unsigned int sum = 0;
for (int base = 0; base < BLOCK_MEMORY; base += BIN_COUNT)
sum += s_Hist[base + pos] & 0x07FFFFFFU;
atomicAdd(d_Result + pos, sum);
}
return;
}
}
//Thread block (== subhistogram) count
#define BLOCK_N 64
////////////////////////////////////////////////////////////////////////////////
// Put all kernels together
////////////////////////////////////////////////////////////////////////////////
//histogram1024kernel() results buffer
unsigned int* d_Result1024;
//Internal memory allocation
void initHistogram1024(void)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(hipMalloc((void**)&d_Result1024, HISTOGRAM_SIZE));
}
//Internal memory deallocation
void closeHistogram1024(void)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(hipFree(d_Result1024));
}
//histogram1024 CPU front-end
void histogram1024GPU(
Context* ctx,
unsigned int* h_Result,
float* d_Data,
float minimum,
float maximum,
int dataN)
{
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(hipMemset(d_Result1024, 0, HISTOGRAM_SIZE));
}
int times = dataN / (BLOCK_N * THREAD_N);
if ((times * (BLOCK_N * THREAD_N)) < dataN) {
times += 1;
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
for (int i = 0; i < times; ++i) {
hipLaunchKernelGGL(( performHistogram), dim3(dim3(BLOCK_N)), dim3(dim3(THREAD_N)), 0, 0, d_Result1024, d_Data, minimum, maximum, dataN, i);
}
hipDeviceSynchronize();
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(hipMemcpy(h_Result, d_Result1024, HISTOGRAM_SIZE, hipMemcpyDeviceToHost));
}
}
| 00ad1ad72dbea057fe05674f2f204674bcb0946f.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
#include "histogram1024.cuh"
#include <gloop/gloop.h>
#include <gloop/statistics.h>
///////////////////////////////////////////////////////////////////////////////
// This is nvidias histogram256 SDK example modded to do a 1024 point
// histogram
///////////////////////////////////////////////////////////////////////////////
//Total number of possible data values
#define BIN_COUNT 1024 // Changed from 256
#define HISTOGRAM_SIZE (BIN_COUNT * sizeof(unsigned int))
//Machine warp size
#ifndef __DEVICE_EMULATION__
//G80's warp size is 32 threads
#define WARP_LOG_SIZE 5
#else
//Emulation currently doesn't execute threads in coherent groups of 32 threads,
//which effectively means warp size of 1 thread for emulation modes
#define WARP_LOG_SIZE 0
#endif
//Warps in thread block
#define WARP_N 3
//Threads per block count
#ifdef HISTO_WG_SIZE_0
#define THREAD_N HISTO_WG_SIZE_0
#else
#define THREAD_N (WARP_N << WARP_LOG_SIZE)
#endif
//Per-block number of elements in histograms
#define BLOCK_MEMORY (WARP_N * BIN_COUNT)
#define IMUL(a, b) __mul24(a, b)
static __device__ void addData1024(volatile unsigned int* s_WarpHist, unsigned int data, unsigned int threadTag)
{
unsigned int count;
do {
count = s_WarpHist[data] & 0x07FFFFFFU;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
} while (s_WarpHist[data] != count);
}
static __global__ void performHistogram(unsigned int* d_Result, float* d_Data, float minimum, float maximum, int dataN, int cursor)
{
//Current global thread index
const int globalTid = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
//Total number of threads in the compute grid
const int numThreads = IMUL(blockDim.x, gridDim.x);
//WARP_LOG_SIZE higher bits of counter values are tagged
//by lower WARP_LOG_SIZE threadID bits
// Will correctly issue warning when compiling for debug (x<<32-0)
const unsigned int threadTag = threadIdx.x << (32 - WARP_LOG_SIZE);
//Shared memory cache for each warp in current thread block
//Declare as volatile to prevent incorrect compiler optimizations in addPixel()
volatile __shared__ unsigned int s_Hist[BLOCK_MEMORY];
//Current warp shared memory frame
const int warpBase = IMUL(threadIdx.x >> WARP_LOG_SIZE, BIN_COUNT);
//Clear shared memory buffer for current thread block before processing
for (int pos = threadIdx.x; pos < BLOCK_MEMORY; pos += blockDim.x)
s_Hist[pos] = 0;
__syncthreads();
//Cycle through the entire data set, update subhistograms for each warp
//Since threads in warps always execute the same instruction,
//we are safe with the addPixel trick
int pos = globalTid + cursor * numThreads;
{
int result = pos < dataN;
if (result) {
unsigned int data4 = ((d_Data[pos] - minimum) / (maximum - minimum)) * BIN_COUNT;
addData1024(s_Hist + warpBase, data4 & 0x3FFU, threadTag);
}
__syncthreads();
//Merge per-warp histograms into per-block and write to global memory
for (int pos = threadIdx.x; pos < BIN_COUNT; pos += blockDim.x) {
unsigned int sum = 0;
for (int base = 0; base < BLOCK_MEMORY; base += BIN_COUNT)
sum += s_Hist[base + pos] & 0x07FFFFFFU;
atomicAdd(d_Result + pos, sum);
}
return;
}
}
//Thread block (== subhistogram) count
#define BLOCK_N 64
////////////////////////////////////////////////////////////////////////////////
// Put all kernels together
////////////////////////////////////////////////////////////////////////////////
//histogram1024kernel() results buffer
unsigned int* d_Result1024;
//Internal memory allocation
void initHistogram1024(void)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(cudaMalloc((void**)&d_Result1024, HISTOGRAM_SIZE));
}
//Internal memory deallocation
void closeHistogram1024(void)
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(cudaFree(d_Result1024));
}
//histogram1024 CPU front-end
void histogram1024GPU(
Context* ctx,
unsigned int* h_Result,
float* d_Data,
float minimum,
float maximum,
int dataN)
{
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(cudaMemset(d_Result1024, 0, HISTOGRAM_SIZE));
}
int times = dataN / (BLOCK_N * THREAD_N);
if ((times * (BLOCK_N * THREAD_N)) < dataN) {
times += 1;
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Kernel> scope;
for (int i = 0; i < times; ++i) {
performHistogram<<<dim3(BLOCK_N), dim3(THREAD_N)>>>(d_Result1024, d_Data, minimum, maximum, dataN, i);
}
cudaThreadSynchronize();
}
{
gloop::Statistics::Scope<gloop::Statistics::Type::Copy> scope;
checkCudaErrors(cudaMemcpy(h_Result, d_Result1024, HISTOGRAM_SIZE, cudaMemcpyDeviceToHost));
}
}
|
d42d75b3f36880728aadb98ad69e8907d14c73da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
const int N = 200;
const int M = 200;
const double Niter = 1000;
__global__ void Laplace(float* d_out, float* d_in) {
int rowID = blockIdx.x + 1;
int colID = threadIdx.x + 1;
int pos = rowID * (blockDim.x + 2) + colID;
d_out[pos] = (d_in[pos - 1] + d_in[pos + 1] +
d_in[pos - blockDim.x - 2] + d_in[pos + blockDim.x + 2]) / 4.;
}
int main(int argc, char** argv) {
size_t counter = 0;
FILE* writefile;
writefile = fopen("out.txt", "w");
const int gridSize = (N + 2) * (M + 2);
const int ARRAY_BYTES = gridSize * sizeof(float);
float* T_new = new float[gridSize];
float* T_old = new float[gridSize];
int i, j;
for (i = 0; i < gridSize; i ++) {
T_new[i] = 0;
T_old[i] = 0;
}
for (i = 1; i < M + 1; i ++) {
T_new[i] = 1.;
T_old[i] = 1.;
}
float* d_in;
float* d_out;
hipMalloc((void**)&d_in, ARRAY_BYTES);
hipMalloc((void**)&d_out, ARRAY_BYTES);
hipMemcpy(d_in, T_old, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_out, T_new, ARRAY_BYTES, hipMemcpyHostToDevice);
while (counter < Niter) {
hipLaunchKernelGGL(( Laplace), dim3(N), dim3(M), 0, 0, d_out, d_in);
hipLaunchKernelGGL(( Laplace), dim3(N), dim3(M), 0, 0, d_in, d_out);
counter += 2;
}
hipMemcpy(T_new, d_in, ARRAY_BYTES, hipMemcpyDeviceToHost);
for (i = 0; i < M + 1; i ++) {
for (j = 1; j < M + 1; j ++) {
fprintf(writefile, "%.6f ", T_new[i * (M + 2) + j]);
}
fprintf(writefile, "\n");
}
fclose(writefile);
return 0;
}
| d42d75b3f36880728aadb98ad69e8907d14c73da.cu | #include <stdio.h>
#include <math.h>
const int N = 200;
const int M = 200;
const double Niter = 1000;
__global__ void Laplace(float* d_out, float* d_in) {
int rowID = blockIdx.x + 1;
int colID = threadIdx.x + 1;
int pos = rowID * (blockDim.x + 2) + colID;
d_out[pos] = (d_in[pos - 1] + d_in[pos + 1] +
d_in[pos - blockDim.x - 2] + d_in[pos + blockDim.x + 2]) / 4.;
}
int main(int argc, char** argv) {
size_t counter = 0;
FILE* writefile;
writefile = fopen("out.txt", "w");
const int gridSize = (N + 2) * (M + 2);
const int ARRAY_BYTES = gridSize * sizeof(float);
float* T_new = new float[gridSize];
float* T_old = new float[gridSize];
int i, j;
for (i = 0; i < gridSize; i ++) {
T_new[i] = 0;
T_old[i] = 0;
}
for (i = 1; i < M + 1; i ++) {
T_new[i] = 1.;
T_old[i] = 1.;
}
float* d_in;
float* d_out;
cudaMalloc((void**)&d_in, ARRAY_BYTES);
cudaMalloc((void**)&d_out, ARRAY_BYTES);
cudaMemcpy(d_in, T_old, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_out, T_new, ARRAY_BYTES, cudaMemcpyHostToDevice);
while (counter < Niter) {
Laplace<<<N, M>>>(d_out, d_in);
Laplace<<<N, M>>>(d_in, d_out);
counter += 2;
}
cudaMemcpy(T_new, d_in, ARRAY_BYTES, cudaMemcpyDeviceToHost);
for (i = 0; i < M + 1; i ++) {
for (j = 1; j < M + 1; j ++) {
fprintf(writefile, "%.6f ", T_new[i * (M + 2) + j]);
}
fprintf(writefile, "\n");
}
fclose(writefile);
return 0;
}
|
8f7a2c600b0bfce46a9c3a528b38df9a55d2334d.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/all.h>
#include <torch/python.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <ATen/hip/Atomic.cuh>
template <typename scalar_t>
__device__ __forceinline__ scalar_t bilinear_func(const scalar_t * value, const scalar_t xx, const scalar_t yy, const int x_lower, const int y_lower, const int W){
const scalar_t diff_x = xx - x_lower;
const scalar_t diff_y = yy - y_lower;
const scalar_t v1 = *(value);
const scalar_t v2 = *(value+1);
const scalar_t v3 = *(value+W);
const scalar_t v4 = *(value+W+1);
const scalar_t w1 = (1 - diff_x) * (1 - diff_y);
const scalar_t w2 = diff_x * (1 - diff_y);
const scalar_t w3 = (1 - diff_x) * diff_y;
const scalar_t w4 = diff_x * diff_y;
return v1 * w1 + v2 * w2 + v3 * w3 + v4 * w4;
}
template <typename scalar_t>
__global__ void dcnv3_cuda_kernel(
const torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> value,
const torch::PackedTensorAccessor32<scalar_t, 6, torch::RestrictPtrTraits> grid,
const torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> weight,
torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> out,
const int H, const int W, const int Ho, const int Wo, const int Pmax){
const int C = blockIdx.x;
const int G = blockIdx.y;
const int B = blockIdx.z;
const int n_thread = threadIdx.x;
const int thread_stride = blockDim.x;
int x,y,i,j;
scalar_t v;
for (i=n_thread; i<Ho*Wo; i=i+thread_stride){
x = i % Wo;
y = i / Wo;
v = 0;
const scalar_t * grid_ptr = & grid[B][G][y][x][0][0];
const scalar_t * w_ptr = & weight[B][G][y][x][0];
for (j=0; j<Pmax; j++){
const scalar_t xx = *(grid_ptr+j*2) * (W-1);
const scalar_t yy = *(grid_ptr+j*2+1) * (H-1);
if (xx>=0 && xx<W-1 && yy>=0 && yy<H-1){
const int y_lower = floor(yy);
const int x_lower = floor(xx);
v += *(w_ptr + j) * bilinear_func(&value[B][G][C][y_lower][x_lower], xx, yy, x_lower, y_lower, W);
// printf("%d / %d %f\n",j, Pmax,v);
}
}
*&out[B][G][C][y][x] = v;
}
}
torch::Tensor dcnv3_cuda(const torch::Tensor value, const torch::Tensor grid, const torch::Tensor weight){
// value shape: [B,G,C,H,W]
// grid shape: [B,Ho,Wo,G,P,2]
// weight shape: [B,Ho,Wo,G,P]
const int B = value.size(0);
const int G = value.size(1);
const int C = value.size(2);
const int Hin = value.size(3);
const int Win = value.size(4);
const int Hout = grid.size(2);
const int Wout = grid.size(3);
const int P = grid.size(4);
const int threads = 1024;
const dim3 blocks(C,G,B);
auto out = torch::zeros({B,G,C,Hout,Wout}, value.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(value.type(), "generate grid sample", (
[&]hipLaunchKernelGGL(({dcnv3_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
value.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
grid.packed_accessor32<scalar_t, 6, torch::RestrictPtrTraits>(),
weight.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
out.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
Hin, Win, Hout, Wout, P);
}
)
);
return out;
}
template <typename scalar_t>
__device__ __forceinline__ void bilinear_back(const scalar_t * value, const scalar_t weight, const scalar_t grad,
scalar_t * value_grad, scalar_t * grid_grad, scalar_t * weight_grad,
const scalar_t xx, const scalar_t yy, const int x_lower, const int y_lower, const int H, const int W){
const scalar_t diff_x = xx - x_lower;
const scalar_t diff_y = yy - y_lower;
const scalar_t v1 = *(value);
const scalar_t v2 = *(value+1);
const scalar_t v3 = *(value+W);
const scalar_t v4 = *(value+W+1);
const scalar_t w1 = (1 - diff_x) * (1 - diff_y);
const scalar_t w2 = diff_x * (1 - diff_y);
const scalar_t w3 = (1 - diff_x) * diff_y;
const scalar_t w4 = diff_x * diff_y;
gpuAtomicAdd(value_grad, grad*w1*weight);
gpuAtomicAdd(value_grad+1, grad*w2*weight);
gpuAtomicAdd(value_grad+W, grad*w3*weight);
gpuAtomicAdd(value_grad+W+1, grad*w4*weight);
gpuAtomicAdd(grid_grad, weight*grad*(W-1)*(-v1*(1-diff_y) + v2*(1-diff_y) - v3*diff_y + v4*diff_y));
gpuAtomicAdd(grid_grad+1, weight*grad*(H-1)*(-v1*(1-diff_x) - v2*diff_x + v3*(1-diff_x) + v4*diff_x));
gpuAtomicAdd(weight_grad, grad*(v1*w1 + v2*w2 + v3*w3 + v4*w4));
}
template <typename scalar_t>
__global__ void dcnv3_backward_cuda_kernel(
const torch::PackedTensorAccessor32<scalar_t,5> value,
const torch::PackedTensorAccessor32<scalar_t,6> grid,
const torch::PackedTensorAccessor32<scalar_t,5> weight,
const torch::PackedTensorAccessor32<scalar_t,5> grad_out,
torch::PackedTensorAccessor32<scalar_t, 5> value_grad,
torch::PackedTensorAccessor32<scalar_t, 6> grid_grad,
torch::PackedTensorAccessor32<scalar_t, 5> weight_grad,
const int H, const int W, const int Ho, const int Wo, const int Pmax){
const int C = blockIdx.x;
const int G = blockIdx.y;
const int B = blockIdx.z;
const int n_thread = threadIdx.x;
const int thread_stride = blockDim.x;
int x,y,i;
// __shared__ scalar_t cache_grid_grad[Ho*Wo*2];
// __shared__ scalar_t cache_weight_grad[Ho*Wo*2];
// for (int i=n_thread; i<Ho*Wo; i+=thread_stride){
// cache_grid_grad[i*2] = 0;
// cache_grid_grad[i*2+1] = 0;
// cache_weight_grad[i] = 0;
// }
// __syncthreads();
for (i=n_thread; i<Ho*Wo; i+=thread_stride){
x = i / Wo;
y = i % Wo; // swap x and y here, avoid concurrent write to the same memory address
const scalar_t * grid_ptr = & grid[B][G][y][x][0][0];
const scalar_t grad_pix = grad_out[B][G][C][y][x];
for (int j=0;j<Pmax;j++){
const scalar_t xx = *(grid_ptr+j*2) * (W-1);
const scalar_t yy = *(grid_ptr+j*2+1) * (H-1);
if (xx>=0 && xx<W-1 && yy>=0 && yy<H-1){
const int y_lower = floor(yy);
const int x_lower = floor(xx);
bilinear_back(&value[B][G][C][y_lower][x_lower],
weight[B][G][y][x][j],
grad_pix,
&value_grad[B][G][C][y_lower][x_lower],
&grid_grad[B][G][y][x][j][0], //&cache_grid_grad[2*i], //
&weight_grad[B][G][y][x][j], //&cache_weight_grad[i], //
xx, yy, x_lower, y_lower, H, W);
}
}
}
// __syncthreads();
// for (i=0; i<Ho*Wo; i+=thread_stride){
// }
}
std::vector<torch::Tensor> dcnv3_backward_cuda(const torch::Tensor value, const torch::Tensor grid, const torch::Tensor weight, const torch::Tensor grad_out){
const int B = value.size(0);
const int G = value.size(1);
const int C = value.size(2);
const int Hin = value.size(3);
const int Win = value.size(4);
const int Hout = grid.size(2);
const int Wout = grid.size(3);
const int P = grid.size(4);
auto value_grad = torch::zeros_like(value);
auto grid_grad = torch::zeros_like(grid);
auto weight_grad = torch::zeros_like(weight);
const int threads = min(1024, Hout*Wout);
const dim3 blocks(C,G,B);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(value.type(), "grid sample backward", (
[&]hipLaunchKernelGGL(({dcnv3_backward_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
value.packed_accessor32<scalar_t, 5>(),
grid.packed_accessor32<scalar_t, 6>(),
weight.packed_accessor32<scalar_t, 5>(),
grad_out.packed_accessor32<scalar_t, 5>(),
value_grad.packed_accessor32<scalar_t, 5>(),
grid_grad.packed_accessor32<scalar_t, 6>(),
weight_grad.packed_accessor32<scalar_t, 5>(),
Hin, Win, Hout, Wout, P);
}
)
);
return {value_grad, grid_grad, weight_grad};
}
| 8f7a2c600b0bfce46a9c3a528b38df9a55d2334d.cu | #include <torch/all.h>
#include <torch/python.h>
#include <cuda.h>
#include <vector>
#include <ATen/cuda/Atomic.cuh>
template <typename scalar_t>
__device__ __forceinline__ scalar_t bilinear_func(const scalar_t * value, const scalar_t xx, const scalar_t yy, const int x_lower, const int y_lower, const int W){
const scalar_t diff_x = xx - x_lower;
const scalar_t diff_y = yy - y_lower;
const scalar_t v1 = *(value);
const scalar_t v2 = *(value+1);
const scalar_t v3 = *(value+W);
const scalar_t v4 = *(value+W+1);
const scalar_t w1 = (1 - diff_x) * (1 - diff_y);
const scalar_t w2 = diff_x * (1 - diff_y);
const scalar_t w3 = (1 - diff_x) * diff_y;
const scalar_t w4 = diff_x * diff_y;
return v1 * w1 + v2 * w2 + v3 * w3 + v4 * w4;
}
template <typename scalar_t>
__global__ void dcnv3_cuda_kernel(
const torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> value,
const torch::PackedTensorAccessor32<scalar_t, 6, torch::RestrictPtrTraits> grid,
const torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> weight,
torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> out,
const int H, const int W, const int Ho, const int Wo, const int Pmax){
const int C = blockIdx.x;
const int G = blockIdx.y;
const int B = blockIdx.z;
const int n_thread = threadIdx.x;
const int thread_stride = blockDim.x;
int x,y,i,j;
scalar_t v;
for (i=n_thread; i<Ho*Wo; i=i+thread_stride){
x = i % Wo;
y = i / Wo;
v = 0;
const scalar_t * grid_ptr = & grid[B][G][y][x][0][0];
const scalar_t * w_ptr = & weight[B][G][y][x][0];
for (j=0; j<Pmax; j++){
const scalar_t xx = *(grid_ptr+j*2) * (W-1);
const scalar_t yy = *(grid_ptr+j*2+1) * (H-1);
if (xx>=0 && xx<W-1 && yy>=0 && yy<H-1){
const int y_lower = floor(yy);
const int x_lower = floor(xx);
v += *(w_ptr + j) * bilinear_func(&value[B][G][C][y_lower][x_lower], xx, yy, x_lower, y_lower, W);
// printf("%d / %d %f\n",j, Pmax,v);
}
}
*&out[B][G][C][y][x] = v;
}
}
torch::Tensor dcnv3_cuda(const torch::Tensor value, const torch::Tensor grid, const torch::Tensor weight){
// value shape: [B,G,C,H,W]
// grid shape: [B,Ho,Wo,G,P,2]
// weight shape: [B,Ho,Wo,G,P]
const int B = value.size(0);
const int G = value.size(1);
const int C = value.size(2);
const int Hin = value.size(3);
const int Win = value.size(4);
const int Hout = grid.size(2);
const int Wout = grid.size(3);
const int P = grid.size(4);
const int threads = 1024;
const dim3 blocks(C,G,B);
auto out = torch::zeros({B,G,C,Hout,Wout}, value.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(value.type(), "generate grid sample", (
[&]{dcnv3_cuda_kernel<scalar_t><<<blocks, threads>>>(
value.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
grid.packed_accessor32<scalar_t, 6, torch::RestrictPtrTraits>(),
weight.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
out.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
Hin, Win, Hout, Wout, P);
}
)
);
return out;
}
template <typename scalar_t>
__device__ __forceinline__ void bilinear_back(const scalar_t * value, const scalar_t weight, const scalar_t grad,
scalar_t * value_grad, scalar_t * grid_grad, scalar_t * weight_grad,
const scalar_t xx, const scalar_t yy, const int x_lower, const int y_lower, const int H, const int W){
const scalar_t diff_x = xx - x_lower;
const scalar_t diff_y = yy - y_lower;
const scalar_t v1 = *(value);
const scalar_t v2 = *(value+1);
const scalar_t v3 = *(value+W);
const scalar_t v4 = *(value+W+1);
const scalar_t w1 = (1 - diff_x) * (1 - diff_y);
const scalar_t w2 = diff_x * (1 - diff_y);
const scalar_t w3 = (1 - diff_x) * diff_y;
const scalar_t w4 = diff_x * diff_y;
gpuAtomicAdd(value_grad, grad*w1*weight);
gpuAtomicAdd(value_grad+1, grad*w2*weight);
gpuAtomicAdd(value_grad+W, grad*w3*weight);
gpuAtomicAdd(value_grad+W+1, grad*w4*weight);
gpuAtomicAdd(grid_grad, weight*grad*(W-1)*(-v1*(1-diff_y) + v2*(1-diff_y) - v3*diff_y + v4*diff_y));
gpuAtomicAdd(grid_grad+1, weight*grad*(H-1)*(-v1*(1-diff_x) - v2*diff_x + v3*(1-diff_x) + v4*diff_x));
gpuAtomicAdd(weight_grad, grad*(v1*w1 + v2*w2 + v3*w3 + v4*w4));
}
template <typename scalar_t>
__global__ void dcnv3_backward_cuda_kernel(
const torch::PackedTensorAccessor32<scalar_t,5> value,
const torch::PackedTensorAccessor32<scalar_t,6> grid,
const torch::PackedTensorAccessor32<scalar_t,5> weight,
const torch::PackedTensorAccessor32<scalar_t,5> grad_out,
torch::PackedTensorAccessor32<scalar_t, 5> value_grad,
torch::PackedTensorAccessor32<scalar_t, 6> grid_grad,
torch::PackedTensorAccessor32<scalar_t, 5> weight_grad,
const int H, const int W, const int Ho, const int Wo, const int Pmax){
const int C = blockIdx.x;
const int G = blockIdx.y;
const int B = blockIdx.z;
const int n_thread = threadIdx.x;
const int thread_stride = blockDim.x;
int x,y,i;
// __shared__ scalar_t cache_grid_grad[Ho*Wo*2];
// __shared__ scalar_t cache_weight_grad[Ho*Wo*2];
// for (int i=n_thread; i<Ho*Wo; i+=thread_stride){
// cache_grid_grad[i*2] = 0;
// cache_grid_grad[i*2+1] = 0;
// cache_weight_grad[i] = 0;
// }
// __syncthreads();
for (i=n_thread; i<Ho*Wo; i+=thread_stride){
x = i / Wo;
y = i % Wo; // swap x and y here, avoid concurrent write to the same memory address
const scalar_t * grid_ptr = & grid[B][G][y][x][0][0];
const scalar_t grad_pix = grad_out[B][G][C][y][x];
for (int j=0;j<Pmax;j++){
const scalar_t xx = *(grid_ptr+j*2) * (W-1);
const scalar_t yy = *(grid_ptr+j*2+1) * (H-1);
if (xx>=0 && xx<W-1 && yy>=0 && yy<H-1){
const int y_lower = floor(yy);
const int x_lower = floor(xx);
bilinear_back(&value[B][G][C][y_lower][x_lower],
weight[B][G][y][x][j],
grad_pix,
&value_grad[B][G][C][y_lower][x_lower],
&grid_grad[B][G][y][x][j][0], //&cache_grid_grad[2*i], //
&weight_grad[B][G][y][x][j], //&cache_weight_grad[i], //
xx, yy, x_lower, y_lower, H, W);
}
}
}
// __syncthreads();
// for (i=0; i<Ho*Wo; i+=thread_stride){
// }
}
std::vector<torch::Tensor> dcnv3_backward_cuda(const torch::Tensor value, const torch::Tensor grid, const torch::Tensor weight, const torch::Tensor grad_out){
const int B = value.size(0);
const int G = value.size(1);
const int C = value.size(2);
const int Hin = value.size(3);
const int Win = value.size(4);
const int Hout = grid.size(2);
const int Wout = grid.size(3);
const int P = grid.size(4);
auto value_grad = torch::zeros_like(value);
auto grid_grad = torch::zeros_like(grid);
auto weight_grad = torch::zeros_like(weight);
const int threads = min(1024, Hout*Wout);
const dim3 blocks(C,G,B);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(value.type(), "grid sample backward", (
[&]{dcnv3_backward_cuda_kernel<scalar_t><<<blocks, threads>>>(
value.packed_accessor32<scalar_t, 5>(),
grid.packed_accessor32<scalar_t, 6>(),
weight.packed_accessor32<scalar_t, 5>(),
grad_out.packed_accessor32<scalar_t, 5>(),
value_grad.packed_accessor32<scalar_t, 5>(),
grid_grad.packed_accessor32<scalar_t, 6>(),
weight_grad.packed_accessor32<scalar_t, 5>(),
Hin, Win, Hout, Wout, P);
}
)
);
return {value_grad, grid_grad, weight_grad};
}
|
34abc3e950173fe982db0fd16754b4610be7f5e0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Three dimensional Matrix Multiplication using cublas
*
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0], prhs[1] := mxGPUArray or CPU Array}
* gpuArray output, C=MM3D_CUBLAS(A,B,alpha) C=A*B*alpha.
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "3DMultiplicationCUBlas.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include <rocblas.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, const int line)
{
if (code != hipSuccess)
{
//fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), __FILE__, __LINE__);
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(code));
hipDeviceReset();
mexErrMsgIdAndTxt( "MATLAB:mexatexit:fatal", "check the memory and process usage");
}
}
void ThreeDMultiplicationCUBlas(int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows, int numCColumns,
int batch_count,
float **A,
float **B,
float **C,
float alpha,
float beta){
hipblasHandle_t handle;
hipblasCreate(&handle);
// Create host pointer array to device matrix storage
float **d_A, **d_B, **d_C, **h_d_A, **h_d_B, **h_d_C;
h_d_A = (float**)malloc(batch_count*sizeof(float*));
h_d_B = (float**)malloc(batch_count*sizeof(float*));
h_d_C = (float**)malloc(batch_count*sizeof(float*));
for (int i = 0; i<batch_count; i++) {
//hipMalloc((void**)&h_d_A[i], dim*dim*sizeof(float));
//hipMalloc((void**)&h_d_B[i], dim*dim*sizeof(float));
//hipMalloc((void**)&h_d_C[i], dim*dim*sizeof(float));
hipMalloc((void**)&h_d_A[i], numARows*numAColumns*sizeof(float));
hipMalloc((void**)&h_d_B[i], numBRows*numBColumns*sizeof(float));
hipMalloc((void**)&h_d_C[i], numCRows*numCColumns*sizeof(float));
}
// Copy the host array of device pointers to the device
hipMalloc((void**)&d_A, batch_count*sizeof(float*));
hipMalloc((void**)&d_B, batch_count*sizeof(float*));
hipMalloc((void**)&d_C, batch_count*sizeof(float*));
hipMemcpy(d_A, h_d_A, batch_count*sizeof(float*), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_d_B, batch_count*sizeof(float*), hipMemcpyHostToDevice);
hipMemcpy(d_C, h_d_C, batch_count*sizeof(float*), hipMemcpyHostToDevice);
for (int i = 0; i<batch_count; i++) {
//hipblasSetMatrix(dim, dim, sizeof(float), A[i], dim, h_d_A[i], dim);
//hipblasSetMatrix(dim, dim, sizeof(float), B[i], dim, h_d_B[i], dim);
//hipblasSetMatrix(dim, dim, sizeof(float), C[i], dim, h_d_C[i], dim);
//stat = hipblasSetMatrix(m, n, sizeof (*a), a, m, d_a, m); /
//# define m 5 // number of rows
// # define n 6 // number of columns
hipblasSetMatrix(numARows, numAColumns, sizeof(float), A[i], numARows, h_d_A[i], numARows);
hipblasSetMatrix(numBRows, numBColumns, sizeof(float), B[i], numBRows, h_d_B[i], numBRows);
hipblasSetMatrix(numCRows, numCColumns, sizeof(float), C[i], numCRows, h_d_C[i], numCRows);
}
hipblasSgemmBatched(handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
numARows, numBColumns, numAColumns,
&alpha,
(const float**)d_A, numARows,
(const float**)d_B, numBRows,
&beta,
d_C, numCRows,
batch_count);
for (int i = 0; i < batch_count; i++){
hipblasGetMatrix(numARows, numBColumns, sizeof(float), h_d_C[i], numARows, C[i], numARows);
}
for (int i = 0; i<batch_count; i++) {
//free(A[i]);
//free(B[i]);
//free(C[i]);
hipFree(h_d_A[i]);
hipFree(h_d_B[i]);
hipFree(h_d_C[i]);
}
//free(A);
//free(B);
//free(C);
free(h_d_A);
free(h_d_B);
free(h_d_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipblasDestroy(handle);
return;
}
| 34abc3e950173fe982db0fd16754b4610be7f5e0.cu | /*
* Three dimensional Matrix Multiplication using cublas
*
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0], prhs[1] := mxGPUArray or CPU Array}
* gpuArray output, C=MM3D_CUBLAS(A,B,alpha) C=A*B*alpha.
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "3DMultiplicationCUBlas.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <cublas_v2.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, const int line)
{
if (code != cudaSuccess)
{
//fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), __FILE__, __LINE__);
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(code));
cudaDeviceReset();
mexErrMsgIdAndTxt( "MATLAB:mexatexit:fatal", "check the memory and process usage");
}
}
void ThreeDMultiplicationCUBlas(int numARows,
int numAColumns, int numBRows,
int numBColumns, int numCRows, int numCColumns,
int batch_count,
float **A,
float **B,
float **C,
float alpha,
float beta){
cublasHandle_t handle;
cublasCreate(&handle);
// Create host pointer array to device matrix storage
float **d_A, **d_B, **d_C, **h_d_A, **h_d_B, **h_d_C;
h_d_A = (float**)malloc(batch_count*sizeof(float*));
h_d_B = (float**)malloc(batch_count*sizeof(float*));
h_d_C = (float**)malloc(batch_count*sizeof(float*));
for (int i = 0; i<batch_count; i++) {
//cudaMalloc((void**)&h_d_A[i], dim*dim*sizeof(float));
//cudaMalloc((void**)&h_d_B[i], dim*dim*sizeof(float));
//cudaMalloc((void**)&h_d_C[i], dim*dim*sizeof(float));
cudaMalloc((void**)&h_d_A[i], numARows*numAColumns*sizeof(float));
cudaMalloc((void**)&h_d_B[i], numBRows*numBColumns*sizeof(float));
cudaMalloc((void**)&h_d_C[i], numCRows*numCColumns*sizeof(float));
}
// Copy the host array of device pointers to the device
cudaMalloc((void**)&d_A, batch_count*sizeof(float*));
cudaMalloc((void**)&d_B, batch_count*sizeof(float*));
cudaMalloc((void**)&d_C, batch_count*sizeof(float*));
cudaMemcpy(d_A, h_d_A, batch_count*sizeof(float*), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_d_B, batch_count*sizeof(float*), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_d_C, batch_count*sizeof(float*), cudaMemcpyHostToDevice);
for (int i = 0; i<batch_count; i++) {
//cublasSetMatrix(dim, dim, sizeof(float), A[i], dim, h_d_A[i], dim);
//cublasSetMatrix(dim, dim, sizeof(float), B[i], dim, h_d_B[i], dim);
//cublasSetMatrix(dim, dim, sizeof(float), C[i], dim, h_d_C[i], dim);
//stat = cublasSetMatrix(m, n, sizeof (*a), a, m, d_a, m); /
//# define m 5 // number of rows
// # define n 6 // number of columns
cublasSetMatrix(numARows, numAColumns, sizeof(float), A[i], numARows, h_d_A[i], numARows);
cublasSetMatrix(numBRows, numBColumns, sizeof(float), B[i], numBRows, h_d_B[i], numBRows);
cublasSetMatrix(numCRows, numCColumns, sizeof(float), C[i], numCRows, h_d_C[i], numCRows);
}
cublasSgemmBatched(handle,
CUBLAS_OP_N, CUBLAS_OP_N,
numARows, numBColumns, numAColumns,
&alpha,
(const float**)d_A, numARows,
(const float**)d_B, numBRows,
&beta,
d_C, numCRows,
batch_count);
for (int i = 0; i < batch_count; i++){
cublasGetMatrix(numARows, numBColumns, sizeof(float), h_d_C[i], numARows, C[i], numARows);
}
for (int i = 0; i<batch_count; i++) {
//free(A[i]);
//free(B[i]);
//free(C[i]);
cudaFree(h_d_A[i]);
cudaFree(h_d_B[i]);
cudaFree(h_d_C[i]);
}
//free(A);
//free(B);
//free(C);
free(h_d_A);
free(h_d_B);
free(h_d_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cublasDestroy(handle);
return;
}
|
213d4f56a1fb8f5aae34764f3a1f86905c497ab9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cubicLattice.cuh"
#include "functions.h"
/*! \file cubicLattice.cu */
/*!
\addtogroup modelKernels
@{
*/
__global__ void gpu_set_random_spins_kernel(dVec *pos, hiprandState_t *rngs,int N)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
hiprandState_t randState;
randState = rngs[blockIdx.x];
for (int j =0 ; j < threadIdx.x; ++j)
hiprand(&randState);
for (int dd = 0; dd < DIMENSION; ++dd)
pos[idx][dd] = hiprand_normal(&randState);
scalar lambda = sqrt(dot(pos[idx],pos[idx]));
pos[idx] = (1/lambda)*pos[idx];
rngs[blockIdx.x] = randState;
return;
};
bool gpu_set_random_spins(dVec *d_pos,
hiprandState_t *rngs,
int blockSize,
int nBlocks,
int N
)
{
cout << "calling gpu spin setting" << endl;
hipLaunchKernelGGL(( gpu_set_random_spins_kernel), dim3(nBlocks),dim3(blockSize), 0, 0, d_pos,rngs,N);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
}
__global__ void gpu_update_spins_kernel(dVec *d_disp,
dVec *d_pos,
scalar scale,
int N,
bool normalize)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
d_pos[idx] += scale*d_disp[idx];
if(normalize)
{
scalar nrm =norm(d_pos[idx]);
d_pos[idx] = (1.0/nrm)*d_pos[idx];
}
}
__global__ void gpu_update_spins_simple_kernel(dVec *d_disp,
dVec *d_pos,
int N)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
int pidx = idx/DIMENSION;
if(pidx>=N) return;
int didx = idx%DIMENSION;
d_pos[pidx][didx] += d_disp[pidx][didx];
}
bool gpu_update_spins(dVec *d_disp,
dVec *d_pos,
scalar scale,
int N,
bool normalize)
{
unsigned int block_size = 1024;
if (N < 128) block_size = 16;
unsigned int nblocks = N/block_size + 1;
if(!normalize && scale == 1.)
{
nblocks = DIMENSION*N/block_size + 1;
hipLaunchKernelGGL(( gpu_update_spins_simple_kernel), dim3(nblocks),dim3(block_size), 0, 0, d_disp,d_pos,N);
}
else
hipLaunchKernelGGL(( gpu_update_spins_kernel), dim3(nblocks),dim3(block_size), 0, 0, d_disp,d_pos,scale,N,normalize);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
}
__global__ void gpu_copy_boundary_object_kernel(dVec *pos,
int *sites,
int *neighbors,
pair<int,dVec> *assistStructure,
int *types,
Index2D neighborIndex,
int motionDirection,
bool resetLattice,
int Nsites)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx>=Nsites) return;
int site = sites[idx];
int motionSite = neighbors[neighborIndex(motionDirection,site)];
assistStructure[idx].first = motionSite;
assistStructure[idx].second = pos[site];
if(resetLattice)
types[site] = 0;
return;
}
bool gpu_copy_boundary_object(dVec *pos,int *sites,int *neighbors,pair<int,dVec> *assistStructure,
int *types,Index2D neighborIndex,int motionDirection,bool resetLattice,int Nsites)
{
unsigned int block_size = 512;
if (Nsites < 512) block_size = 16;
unsigned int nblocks = Nsites/block_size + 1;
hipLaunchKernelGGL(( gpu_copy_boundary_object_kernel), dim3(nblocks),dim3(block_size), 0, 0, pos,sites,neighbors,assistStructure,types,
neighborIndex,motionDirection,resetLattice,Nsites);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
}
__global__ void gpu_move_boundary_object_kernel(dVec *pos,int *sites,pair<int,dVec> *assistStructure,
int *types,int newTypeValue,int Nsites)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx>=Nsites) return;
int site = assistStructure[idx].first;
sites[idx] =site;
pos[site] =assistStructure[idx].second;
types[site] = newTypeValue;
return;
}
bool gpu_move_boundary_object(dVec *pos,int *sites,pair<int,dVec> *assistStructure,
int *types,int newTypeValue,int Nsites)
{
unsigned int block_size = 512;
if (Nsites < 512) block_size = 16;
unsigned int nblocks = Nsites/block_size + 1;
hipLaunchKernelGGL(( gpu_move_boundary_object_kernel), dim3(nblocks),dim3(block_size), 0, 0, pos,sites,assistStructure,types,
newTypeValue,Nsites);
HANDLE_ERROR(hipGetLastError());
return hipSuccess;
}
/** @} */ //end of group declaration
| 213d4f56a1fb8f5aae34764f3a1f86905c497ab9.cu | #include "cubicLattice.cuh"
#include "functions.h"
/*! \file cubicLattice.cu */
/*!
\addtogroup modelKernels
@{
*/
__global__ void gpu_set_random_spins_kernel(dVec *pos, curandState *rngs,int N)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
curandState randState;
randState = rngs[blockIdx.x];
for (int j =0 ; j < threadIdx.x; ++j)
curand(&randState);
for (int dd = 0; dd < DIMENSION; ++dd)
pos[idx][dd] = curand_normal(&randState);
scalar lambda = sqrt(dot(pos[idx],pos[idx]));
pos[idx] = (1/lambda)*pos[idx];
rngs[blockIdx.x] = randState;
return;
};
bool gpu_set_random_spins(dVec *d_pos,
curandState *rngs,
int blockSize,
int nBlocks,
int N
)
{
cout << "calling gpu spin setting" << endl;
gpu_set_random_spins_kernel<<<nBlocks,blockSize>>>(d_pos,rngs,N);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
}
__global__ void gpu_update_spins_kernel(dVec *d_disp,
dVec *d_pos,
scalar scale,
int N,
bool normalize)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= N)
return;
d_pos[idx] += scale*d_disp[idx];
if(normalize)
{
scalar nrm =norm(d_pos[idx]);
d_pos[idx] = (1.0/nrm)*d_pos[idx];
}
}
__global__ void gpu_update_spins_simple_kernel(dVec *d_disp,
dVec *d_pos,
int N)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
int pidx = idx/DIMENSION;
if(pidx>=N) return;
int didx = idx%DIMENSION;
d_pos[pidx][didx] += d_disp[pidx][didx];
}
bool gpu_update_spins(dVec *d_disp,
dVec *d_pos,
scalar scale,
int N,
bool normalize)
{
unsigned int block_size = 1024;
if (N < 128) block_size = 16;
unsigned int nblocks = N/block_size + 1;
if(!normalize && scale == 1.)
{
nblocks = DIMENSION*N/block_size + 1;
gpu_update_spins_simple_kernel<<<nblocks,block_size>>>(d_disp,d_pos,N);
}
else
gpu_update_spins_kernel<<<nblocks,block_size>>>(d_disp,d_pos,scale,N,normalize);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
}
__global__ void gpu_copy_boundary_object_kernel(dVec *pos,
int *sites,
int *neighbors,
pair<int,dVec> *assistStructure,
int *types,
Index2D neighborIndex,
int motionDirection,
bool resetLattice,
int Nsites)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx>=Nsites) return;
int site = sites[idx];
int motionSite = neighbors[neighborIndex(motionDirection,site)];
assistStructure[idx].first = motionSite;
assistStructure[idx].second = pos[site];
if(resetLattice)
types[site] = 0;
return;
}
bool gpu_copy_boundary_object(dVec *pos,int *sites,int *neighbors,pair<int,dVec> *assistStructure,
int *types,Index2D neighborIndex,int motionDirection,bool resetLattice,int Nsites)
{
unsigned int block_size = 512;
if (Nsites < 512) block_size = 16;
unsigned int nblocks = Nsites/block_size + 1;
gpu_copy_boundary_object_kernel<<<nblocks,block_size>>>(pos,sites,neighbors,assistStructure,types,
neighborIndex,motionDirection,resetLattice,Nsites);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
}
__global__ void gpu_move_boundary_object_kernel(dVec *pos,int *sites,pair<int,dVec> *assistStructure,
int *types,int newTypeValue,int Nsites)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx>=Nsites) return;
int site = assistStructure[idx].first;
sites[idx] =site;
pos[site] =assistStructure[idx].second;
types[site] = newTypeValue;
return;
}
bool gpu_move_boundary_object(dVec *pos,int *sites,pair<int,dVec> *assistStructure,
int *types,int newTypeValue,int Nsites)
{
unsigned int block_size = 512;
if (Nsites < 512) block_size = 16;
unsigned int nblocks = Nsites/block_size + 1;
gpu_move_boundary_object_kernel<<<nblocks,block_size>>>(pos,sites,assistStructure,types,
newTypeValue,Nsites);
HANDLE_ERROR(cudaGetLastError());
return cudaSuccess;
}
/** @} */ //end of group declaration
|
Abs.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
template <typename T>
struct absupdateOutput_functor
{
__device__ void operator()(T* output, const T* input) const
{
*output = abs(*input);
}
};
template <typename T>
struct absupdateGradInput_functor
{
__device__ void operator()(T* gradInput, const T* input, const T* gradOutput) const
{
*gradInput = *input < 0 ? - *gradOutput : *gradOutput;
}
};
#include "generic/Abs.cu"
#include "THHGenerateFloatTypes.h"
| Abs.cu | #include "THCUNN.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
template <typename T>
struct absupdateOutput_functor
{
__device__ void operator()(T* output, const T* input) const
{
*output = abs(*input);
}
};
template <typename T>
struct absupdateGradInput_functor
{
__device__ void operator()(T* gradInput, const T* input, const T* gradOutput) const
{
*gradInput = *input < 0 ? - *gradOutput : *gradOutput;
}
};
#include "generic/Abs.cu"
#include "THCGenerateFloatTypes.h"
|
0b833308a546e8162a4e83b2243020c379793174.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2009, Jiri Matela
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include <error.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <assert.h>
#include <sys/time.h>
#include <getopt.h>
#include "common.h"
#include "components.h"
#include "dwt.h"
struct dwt {
char * srcFilename;
char * outFilename;
unsigned char *srcImg;
int pixWidth;
int pixHeight;
int components;
int dwtLvls;
};
int getImg(char * srcFilename, unsigned char *srcImg, int inputSize)
{
// printf("Loading ipnput: %s\n", srcFilename);
//char *path = "../../data/dwt2d/";
char *path = "../data/dwt2d/";
char *newSrc = NULL;
if((newSrc = (char *)malloc(strlen(srcFilename)+strlen(path)+1)) != NULL)
{
newSrc[0] = '\0';
strcat(newSrc, path);
strcat(newSrc, srcFilename);
srcFilename= newSrc;
}
printf("Loading ipnput: %s\n", srcFilename);
//srcFilename = strcat("../../data/dwt2d/",srcFilename);
//read image
int i = open(srcFilename, O_RDONLY, 0644);
if (i == -1) {
error(0,errno,"cannot access %s", srcFilename);
return -1;
}
int ret = read(i, srcImg, inputSize);
printf("precteno %d, inputsize %d\n", ret, inputSize);
close(i);
return 0;
}
void usage() {
printf("dwt [otpions] src_img.rgb <out_img.dwt>\n\
-d, --dimension\t\tdimensions of src img, e.g. 1920x1080\n\
-c, --components\t\tnumber of color components, default 3\n\
-b, --depth\t\t\tbit depth, default 8\n\
-l, --level\t\t\tDWT level, default 3\n\
-D, --device\t\t\tcuda device\n\
-f, --forward\t\t\tforward transform\n\
-r, --reverse\t\t\treverse transform\n\
-9, --97\t\t\t9/7 transform\n\
-5, --53\t\t\t5/3 transform\n\
-w --write-visual\t\twrite output in visual (tiled) fashion instead of the linear\n");
}
template <typename T>
void processDWT(struct dwt *d, int forward, int writeVisual)
{
int componentSize = d->pixWidth*d->pixHeight*sizeof(T);
T *c_r_out, *backup ;
hipMalloc((void**)&c_r_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r_out, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&backup, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(backup, 0, componentSize);
cudaCheckError("Memset device memory");
if (d->components == 3) {
/* Alloc two more buffers for G and B */
T *c_g_out, *c_b_out;
hipMalloc((void**)&c_g_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_g_out, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&c_b_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_b_out, 0, componentSize);
cudaCheckError("Memset device memory");
/* Load components */
T *c_r, *c_g, *c_b;
hipMalloc((void**)&c_r, componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&c_g, componentSize); //< G, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_g, 0, componentSize);
cudaCheckError("Memset device memory");
hipMalloc((void**)&c_b, componentSize); //< B, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_b, 0, componentSize);
cudaCheckError("Memset device memory");
rgbToComponents(c_r, c_g, c_b, d->srcImg, d->pixWidth, d->pixHeight);
/* Compute DWT and always store into file */
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_g, c_g_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_b, c_b_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// -------test----------
// T *h_r_out=(T*)malloc(componentSize);
// hipMemcpy(h_r_out, c_g_out, componentSize, hipMemcpyDeviceToHost);
// int ii;
// for(ii=0;ii<componentSize/sizeof(T);ii++) {
// fprintf(stderr, "%d ", h_r_out[ii]);
// if((ii+1) % (d->pixWidth) == 0) fprintf(stderr, "\n");
// }
// -------test----------
/* Store DWT to file */
#ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".r");
writeNStage2DDWT(c_g_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".g");
writeNStage2DDWT(c_b_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".b");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".r");
writeLinear(c_g_out, d->pixWidth, d->pixHeight, d->outFilename, ".g");
writeLinear(c_b_out, d->pixWidth, d->pixHeight, d->outFilename, ".b");
}
#endif
hipFree(c_r);
cudaCheckError("Cuda free");
hipFree(c_g);
cudaCheckError("Cuda free");
hipFree(c_b);
cudaCheckError("Cuda free");
hipFree(c_g_out);
cudaCheckError("Cuda free");
hipFree(c_b_out);
cudaCheckError("Cuda free");
}
else if (d->components == 1) {
//Load component
T *c_r;
hipMalloc((void**)&(c_r), componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
bwToComponent(c_r, d->srcImg, d->pixWidth, d->pixHeight);
// Compute DWT
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// Store DWT to file
// #ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".out");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".lin.out");
}
// #endif
hipFree(c_r);
cudaCheckError("Cuda free");
}
hipFree(c_r_out);
cudaCheckError("Cuda free device");
hipFree(backup);
cudaCheckError("Cuda free device");
}
int main(int argc, char **argv)
{
int optindex = 0;
char ch;
struct option longopts[] = {
{"dimension", required_argument, 0, 'd'}, //dimensions of src img
{"components", required_argument, 0, 'c'}, //numger of components of src img
{"depth", required_argument, 0, 'b'}, //bit depth of src img
{"level", required_argument, 0, 'l'}, //level of dwt
{"device", required_argument, 0, 'D'}, //cuda device
{"forward", no_argument, 0, 'f'}, //forward transform
{"reverse", no_argument, 0, 'r'}, //reverse transform
{"97", no_argument, 0, '9'}, //9/7 transform
{"53", no_argument, 0, '5' }, //5/3transform
{"write-visual",no_argument, 0, 'w' }, //write output (subbands) in visual (tiled) order instead of linear
{"help", no_argument, 0, 'h'}
};
int pixWidth = 0; //<real pixWidth
int pixHeight = 0; //<real pixHeight
int compCount = 3; //number of components; 3 for RGB or YUV, 4 for RGBA
int bitDepth = 8;
int dwtLvls = 3; //default numuber of DWT levels
int device = 0;
int forward = 1; //forward transform
int dwt97 = 1; //1=dwt9/7, 0=dwt5/3 transform
int writeVisual = 0; //write output (subbands) in visual (tiled) order instead of linear
char * pos;
while ((ch = getopt_long(argc, argv, "d:c:b:l:D:fr95wh", longopts, &optindex)) != -1) {
switch (ch) {
case 'd':
pixWidth = atoi(optarg);
pos = strstr(optarg, "x");
if (pos == NULL || pixWidth == 0 || (strlen(pos) >= strlen(optarg))) {
usage();
return -1;
}
pixHeight = atoi(pos+1);
break;
case 'c':
compCount = atoi(optarg);
break;
case 'b':
bitDepth = atoi(optarg);
break;
case 'l':
dwtLvls = atoi(optarg);
break;
case 'D':
device = atoi(optarg);
break;
case 'f':
forward = 1;
break;
case 'r':
forward = 0;
break;
case '9':
dwt97 = 1;
break;
case '5':
dwt97 = 0;
break;
case 'w':
writeVisual = 1;
break;
case 'h':
usage();
return 0;
case '?':
return -1;
default :
usage();
return -1;
}
}
argc -= optind;
argv += optind;
if (argc == 0) { // at least one filename is expected
printf("Please supply src file name\n");
usage();
return -1;
}
if (pixWidth <= 0 || pixHeight <=0) {
printf("Wrong or missing dimensions\n");
usage();
return -1;
}
if (forward == 0) {
writeVisual = 0; //do not write visual when RDWT
}
// device init
int devCount;
hipGetDeviceCount(&devCount);
cudaCheckError("Get device count");
if (devCount == 0) {
printf("No CUDA enabled device\n");
return -1;
}
if (device < 0 || device > devCount -1) {
printf("Selected device %d is out of bound. Devices on your system are in range %d - %d\n",
device, 0, devCount -1);
return -1;
}
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, device);
cudaCheckError("Get device properties");
if (devProp.major < 1) {
printf("Device %d does not support CUDA\n", device);
return -1;
}
printf("Using device %d: %s\n", device, devProp.name);
hipSetDevice(device);
cudaCheckError("Set selected device");
struct dwt *d;
d = (struct dwt *)malloc(sizeof(struct dwt));
d->srcImg = NULL;
d->pixWidth = pixWidth;
d->pixHeight = pixHeight;
d->components = compCount;
d->dwtLvls = dwtLvls;
// file names
d->srcFilename = (char *)malloc(strlen(argv[0]));
strcpy(d->srcFilename, argv[0]);
if (argc == 1) { // only one filename supplyed
d->outFilename = (char *)malloc(strlen(d->srcFilename)+4);
strcpy(d->outFilename, d->srcFilename);
strcpy(d->outFilename+strlen(d->srcFilename), ".dwt");
} else {
d->outFilename = strdup(argv[1]);
}
//Input review
printf("Source file:\t\t%s\n", d->srcFilename);
printf(" Dimensions:\t\t%dx%d\n", pixWidth, pixHeight);
printf(" Components count:\t%d\n", compCount);
printf(" Bit depth:\t\t%d\n", bitDepth);
printf(" DWT levels:\t\t%d\n", dwtLvls);
printf(" Forward transform:\t%d\n", forward);
printf(" 9/7 transform:\t\t%d\n", dwt97);
//data sizes
int inputSize = pixWidth*pixHeight*compCount; //<amount of data (in bytes) to proccess
//load img source image
hipHostMalloc((void **)&d->srcImg, inputSize);
cudaCheckError("Alloc host memory");
if (getImg(d->srcFilename, d->srcImg, inputSize) == -1)
return -1;
/* DWT */
if (forward == 1) {
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
else { // reverse
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
//writeComponent(r_cuda, pixWidth, pixHeight, srcFilename, ".g");
//writeComponent(g_wave_cuda, 512000, ".g");
//writeComponent(g_cuda, componentSize, ".g");
//writeComponent(b_wave_cuda, componentSize, ".b");
hipHostFree(d->srcImg);
cudaCheckError("Cuda free host");
return 0;
}
| 0b833308a546e8162a4e83b2243020c379793174.cu | /*
* Copyright (c) 2009, Jiri Matela
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include <error.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <assert.h>
#include <sys/time.h>
#include <getopt.h>
#include "common.h"
#include "components.h"
#include "dwt.h"
struct dwt {
char * srcFilename;
char * outFilename;
unsigned char *srcImg;
int pixWidth;
int pixHeight;
int components;
int dwtLvls;
};
int getImg(char * srcFilename, unsigned char *srcImg, int inputSize)
{
// printf("Loading ipnput: %s\n", srcFilename);
//char *path = "../../data/dwt2d/";
char *path = "../data/dwt2d/";
char *newSrc = NULL;
if((newSrc = (char *)malloc(strlen(srcFilename)+strlen(path)+1)) != NULL)
{
newSrc[0] = '\0';
strcat(newSrc, path);
strcat(newSrc, srcFilename);
srcFilename= newSrc;
}
printf("Loading ipnput: %s\n", srcFilename);
//srcFilename = strcat("../../data/dwt2d/",srcFilename);
//read image
int i = open(srcFilename, O_RDONLY, 0644);
if (i == -1) {
error(0,errno,"cannot access %s", srcFilename);
return -1;
}
int ret = read(i, srcImg, inputSize);
printf("precteno %d, inputsize %d\n", ret, inputSize);
close(i);
return 0;
}
void usage() {
printf("dwt [otpions] src_img.rgb <out_img.dwt>\n\
-d, --dimension\t\tdimensions of src img, e.g. 1920x1080\n\
-c, --components\t\tnumber of color components, default 3\n\
-b, --depth\t\t\tbit depth, default 8\n\
-l, --level\t\t\tDWT level, default 3\n\
-D, --device\t\t\tcuda device\n\
-f, --forward\t\t\tforward transform\n\
-r, --reverse\t\t\treverse transform\n\
-9, --97\t\t\t9/7 transform\n\
-5, --53\t\t\t5/3 transform\n\
-w --write-visual\t\twrite output in visual (tiled) fashion instead of the linear\n");
}
template <typename T>
void processDWT(struct dwt *d, int forward, int writeVisual)
{
int componentSize = d->pixWidth*d->pixHeight*sizeof(T);
T *c_r_out, *backup ;
cudaMalloc((void**)&c_r_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r_out, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&backup, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(backup, 0, componentSize);
cudaCheckError("Memset device memory");
if (d->components == 3) {
/* Alloc two more buffers for G and B */
T *c_g_out, *c_b_out;
cudaMalloc((void**)&c_g_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_g_out, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&c_b_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_b_out, 0, componentSize);
cudaCheckError("Memset device memory");
/* Load components */
T *c_r, *c_g, *c_b;
cudaMalloc((void**)&c_r, componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&c_g, componentSize); //< G, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_g, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMalloc((void**)&c_b, componentSize); //< B, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_b, 0, componentSize);
cudaCheckError("Memset device memory");
rgbToComponents(c_r, c_g, c_b, d->srcImg, d->pixWidth, d->pixHeight);
/* Compute DWT and always store into file */
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_g, c_g_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
nStage2dDWT(c_b, c_b_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// -------test----------
// T *h_r_out=(T*)malloc(componentSize);
// cudaMemcpy(h_r_out, c_g_out, componentSize, cudaMemcpyDeviceToHost);
// int ii;
// for(ii=0;ii<componentSize/sizeof(T);ii++) {
// fprintf(stderr, "%d ", h_r_out[ii]);
// if((ii+1) % (d->pixWidth) == 0) fprintf(stderr, "\n");
// }
// -------test----------
/* Store DWT to file */
#ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".r");
writeNStage2DDWT(c_g_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".g");
writeNStage2DDWT(c_b_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".b");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".r");
writeLinear(c_g_out, d->pixWidth, d->pixHeight, d->outFilename, ".g");
writeLinear(c_b_out, d->pixWidth, d->pixHeight, d->outFilename, ".b");
}
#endif
cudaFree(c_r);
cudaCheckError("Cuda free");
cudaFree(c_g);
cudaCheckError("Cuda free");
cudaFree(c_b);
cudaCheckError("Cuda free");
cudaFree(c_g_out);
cudaCheckError("Cuda free");
cudaFree(c_b_out);
cudaCheckError("Cuda free");
}
else if (d->components == 1) {
//Load component
T *c_r;
cudaMalloc((void**)&(c_r), componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
bwToComponent(c_r, d->srcImg, d->pixWidth, d->pixHeight);
// Compute DWT
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls, forward);
// Store DWT to file
// #ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls, d->outFilename, ".out");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".lin.out");
}
// #endif
cudaFree(c_r);
cudaCheckError("Cuda free");
}
cudaFree(c_r_out);
cudaCheckError("Cuda free device");
cudaFree(backup);
cudaCheckError("Cuda free device");
}
int main(int argc, char **argv)
{
int optindex = 0;
char ch;
struct option longopts[] = {
{"dimension", required_argument, 0, 'd'}, //dimensions of src img
{"components", required_argument, 0, 'c'}, //numger of components of src img
{"depth", required_argument, 0, 'b'}, //bit depth of src img
{"level", required_argument, 0, 'l'}, //level of dwt
{"device", required_argument, 0, 'D'}, //cuda device
{"forward", no_argument, 0, 'f'}, //forward transform
{"reverse", no_argument, 0, 'r'}, //reverse transform
{"97", no_argument, 0, '9'}, //9/7 transform
{"53", no_argument, 0, '5' }, //5/3transform
{"write-visual",no_argument, 0, 'w' }, //write output (subbands) in visual (tiled) order instead of linear
{"help", no_argument, 0, 'h'}
};
int pixWidth = 0; //<real pixWidth
int pixHeight = 0; //<real pixHeight
int compCount = 3; //number of components; 3 for RGB or YUV, 4 for RGBA
int bitDepth = 8;
int dwtLvls = 3; //default numuber of DWT levels
int device = 0;
int forward = 1; //forward transform
int dwt97 = 1; //1=dwt9/7, 0=dwt5/3 transform
int writeVisual = 0; //write output (subbands) in visual (tiled) order instead of linear
char * pos;
while ((ch = getopt_long(argc, argv, "d:c:b:l:D:fr95wh", longopts, &optindex)) != -1) {
switch (ch) {
case 'd':
pixWidth = atoi(optarg);
pos = strstr(optarg, "x");
if (pos == NULL || pixWidth == 0 || (strlen(pos) >= strlen(optarg))) {
usage();
return -1;
}
pixHeight = atoi(pos+1);
break;
case 'c':
compCount = atoi(optarg);
break;
case 'b':
bitDepth = atoi(optarg);
break;
case 'l':
dwtLvls = atoi(optarg);
break;
case 'D':
device = atoi(optarg);
break;
case 'f':
forward = 1;
break;
case 'r':
forward = 0;
break;
case '9':
dwt97 = 1;
break;
case '5':
dwt97 = 0;
break;
case 'w':
writeVisual = 1;
break;
case 'h':
usage();
return 0;
case '?':
return -1;
default :
usage();
return -1;
}
}
argc -= optind;
argv += optind;
if (argc == 0) { // at least one filename is expected
printf("Please supply src file name\n");
usage();
return -1;
}
if (pixWidth <= 0 || pixHeight <=0) {
printf("Wrong or missing dimensions\n");
usage();
return -1;
}
if (forward == 0) {
writeVisual = 0; //do not write visual when RDWT
}
// device init
int devCount;
cudaGetDeviceCount(&devCount);
cudaCheckError("Get device count");
if (devCount == 0) {
printf("No CUDA enabled device\n");
return -1;
}
if (device < 0 || device > devCount -1) {
printf("Selected device %d is out of bound. Devices on your system are in range %d - %d\n",
device, 0, devCount -1);
return -1;
}
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, device);
cudaCheckError("Get device properties");
if (devProp.major < 1) {
printf("Device %d does not support CUDA\n", device);
return -1;
}
printf("Using device %d: %s\n", device, devProp.name);
cudaSetDevice(device);
cudaCheckError("Set selected device");
struct dwt *d;
d = (struct dwt *)malloc(sizeof(struct dwt));
d->srcImg = NULL;
d->pixWidth = pixWidth;
d->pixHeight = pixHeight;
d->components = compCount;
d->dwtLvls = dwtLvls;
// file names
d->srcFilename = (char *)malloc(strlen(argv[0]));
strcpy(d->srcFilename, argv[0]);
if (argc == 1) { // only one filename supplyed
d->outFilename = (char *)malloc(strlen(d->srcFilename)+4);
strcpy(d->outFilename, d->srcFilename);
strcpy(d->outFilename+strlen(d->srcFilename), ".dwt");
} else {
d->outFilename = strdup(argv[1]);
}
//Input review
printf("Source file:\t\t%s\n", d->srcFilename);
printf(" Dimensions:\t\t%dx%d\n", pixWidth, pixHeight);
printf(" Components count:\t%d\n", compCount);
printf(" Bit depth:\t\t%d\n", bitDepth);
printf(" DWT levels:\t\t%d\n", dwtLvls);
printf(" Forward transform:\t%d\n", forward);
printf(" 9/7 transform:\t\t%d\n", dwt97);
//data sizes
int inputSize = pixWidth*pixHeight*compCount; //<amount of data (in bytes) to proccess
//load img source image
cudaMallocHost((void **)&d->srcImg, inputSize);
cudaCheckError("Alloc host memory");
if (getImg(d->srcFilename, d->srcImg, inputSize) == -1)
return -1;
/* DWT */
if (forward == 1) {
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
else { // reverse
if(dwt97 == 1 )
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
//writeComponent(r_cuda, pixWidth, pixHeight, srcFilename, ".g");
//writeComponent(g_wave_cuda, 512000, ".g");
//writeComponent(g_cuda, componentSize, ".g");
//writeComponent(b_wave_cuda, componentSize, ".b");
cudaFreeHost(d->srcImg);
cudaCheckError("Cuda free host");
return 0;
}
|
b1b7c352200c9482e9ee79ddca1614861b88ad4b.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
constexpr char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward";
void scaled_modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_cuda);
} // namespace at::native
| b1b7c352200c9482e9ee79ddca1614861b88ad4b.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
constexpr char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward";
void scaled_modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_cuda);
} // namespace at::native
|
b4350d5a618dfced37ac197d3aa43fdc36a0554d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <chrono>
__global__
void multiplyCell(int N, int * a, int * b, int * c){
// We get the index of the current data
unsigned int threadx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int thready = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int threadxy = thready * N + threadx;
// Then we get the col and row
int row = threadxy / N;
int col = threadxy % N;
if(row < N && col < N){
// Then we multiply and add each one of them
int result = 0;
for(int i=0;i<N;i++){
result +=a[row*N+i]*b[i*N+col];
}
c[threadx]=result;
}
}
void GPUTimedMatrixMultiplication(int N,int * a,int * b, int * c,
int ** runs, int runsLength){
// Allocate in GPU
int *d_a,*d_b,*d_c;
int size = N*N*sizeof(int);
hipMalloc(&d_a,size);
hipMalloc(&d_b,size);
hipMalloc(&d_c,size);
// Transfer to device
hipMemcpy(d_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,size,hipMemcpyHostToDevice);
// Call kernel with the blocks, grid and threads specified
for(int i=0;i<runsLength;i++){
int * run = runs[i];
dim3 blocksPerGrid(run[0],run[1],run[2]);
dim3 threadsPerBlock(run[3],run[4],run[5]);
//initialize timer
auto start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( multiplyCell), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, N,d_a,d_b,d_c);
//finish timer
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end - start;
//print result
printf("GPU test dimensions threads %d %d blocks %d %d N: %d duration: %f\n ms\n",
run[0],run[1],run[3],run[4],N,duration_ms.count());
}
// Copy result back from gpu
hipMemcpy(c,d_c,size,hipMemcpyDeviceToHost);
// Free variables
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
void GPUMatrixMultiplication(int N,int * a,int * b, int * c,
int * run){
// Allocate in GPU
int *d_a,*d_b,*d_c;
int size = N*N*sizeof(int);
hipMalloc(&d_a,size);
hipMalloc(&d_b,size);
hipMalloc(&d_c,size);
// Transfer to device
hipMemcpy(d_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,size,hipMemcpyHostToDevice);
// Call kernel with the blocks, grid and threads specified
dim3 blocksPerGrid(run[0],run[1],run[2]);
dim3 threadsPerBlock(run[3],run[4],run[5]);
hipLaunchKernelGGL(( multiplyCell), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, N,d_a,d_b,d_c);
// Copy result back from gpu
hipMemcpy(c,d_c,size,hipMemcpyDeviceToHost);
// Free variables
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
| b4350d5a618dfced37ac197d3aa43fdc36a0554d.cu | #include <stdio.h>
#include <chrono>
__global__
void multiplyCell(int N, int * a, int * b, int * c){
// We get the index of the current data
unsigned int threadx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int thready = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int threadxy = thready * N + threadx;
// Then we get the col and row
int row = threadxy / N;
int col = threadxy % N;
if(row < N && col < N){
// Then we multiply and add each one of them
int result = 0;
for(int i=0;i<N;i++){
result +=a[row*N+i]*b[i*N+col];
}
c[threadx]=result;
}
}
void GPUTimedMatrixMultiplication(int N,int * a,int * b, int * c,
int ** runs, int runsLength){
// Allocate in GPU
int *d_a,*d_b,*d_c;
int size = N*N*sizeof(int);
cudaMalloc(&d_a,size);
cudaMalloc(&d_b,size);
cudaMalloc(&d_c,size);
// Transfer to device
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
// Call kernel with the blocks, grid and threads specified
for(int i=0;i<runsLength;i++){
int * run = runs[i];
dim3 blocksPerGrid(run[0],run[1],run[2]);
dim3 threadsPerBlock(run[3],run[4],run[5]);
//initialize timer
auto start = std::chrono::high_resolution_clock::now();
multiplyCell<<<blocksPerGrid,threadsPerBlock>>>(N,d_a,d_b,d_c);
//finish timer
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end - start;
//print result
printf("GPU test dimensions threads %d %d blocks %d %d N: %d duration: %f\n ms\n",
run[0],run[1],run[3],run[4],N,duration_ms.count());
}
// Copy result back from gpu
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
// Free variables
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
void GPUMatrixMultiplication(int N,int * a,int * b, int * c,
int * run){
// Allocate in GPU
int *d_a,*d_b,*d_c;
int size = N*N*sizeof(int);
cudaMalloc(&d_a,size);
cudaMalloc(&d_b,size);
cudaMalloc(&d_c,size);
// Transfer to device
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
// Call kernel with the blocks, grid and threads specified
dim3 blocksPerGrid(run[0],run[1],run[2]);
dim3 threadsPerBlock(run[3],run[4],run[5]);
multiplyCell<<<blocksPerGrid,threadsPerBlock>>>(N,d_a,d_b,d_c);
// Copy result back from gpu
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
// Free variables
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
f9a52af573090a7c9e511cefd49bc203be11af7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
__global__ void propagateCarries(int* d_matrix, int numCols) {
int idx = blockDim.x * blockIdx.x + threadIdx.x * numCols;
int carry = 0;
for (int i = numCols - 1; i >= 0; i--) {
int rowVal = (d_matrix[idx + i] + carry) % 10;
carry = (d_matrix[idx + i] + carry) / 10;
d_matrix[idx + i] = rowVal;
}
} | f9a52af573090a7c9e511cefd49bc203be11af7f.cu | #include "includes.h"
using namespace std;
__global__ void propagateCarries(int* d_matrix, int numCols) {
int idx = blockDim.x * blockIdx.x + threadIdx.x * numCols;
int carry = 0;
for (int i = numCols - 1; i >= 0; i--) {
int rowVal = (d_matrix[idx + i] + carry) % 10;
carry = (d_matrix[idx + i] + carry) / 10;
d_matrix[idx + i] = rowVal;
}
} |
a6f2aee3085b4b03441c1c965ac8c2c2fdbb3668.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void mm(float *dA, float *dB, float *dC, int DIM, int N, int GPUN) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id <= GPUN) {
int i = id / DIM;
int j = id % DIM;
float sum = 0.0f;
for (int k = 0; k < DIM; k++) {
sum += dA[i*DIM+k] * dB[k*DIM+j];
}
dC[id] = sum;
}
} | a6f2aee3085b4b03441c1c965ac8c2c2fdbb3668.cu | #include "includes.h"
__global__ void mm(float *dA, float *dB, float *dC, int DIM, int N, int GPUN) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id <= GPUN) {
int i = id / DIM;
int j = id % DIM;
float sum = 0.0f;
for (int k = 0; k < DIM; k++) {
sum += dA[i*DIM+k] * dB[k*DIM+j];
}
dC[id] = sum;
}
} |
aac4a108023848d510b3827d6db147c6a91ba787.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*%****************************************************************************80
% Code:
% ncclSendRecv.cu
%
% Purpose:
% Implements sample send/recv code using the package NCCL (p2p).
%
% Modified:
% Aug 18 2020 10:57
%
% Author:
% Murilo Boratto <murilo.boratto 'at' fieb.org.br>
%
% How to Compile:
% nvcc ncclSendRecv.cu -o object -lnccl
%
% HowtoExecute:
% ./object
%
%****************************************************************************80*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <rccl.h>
__global__ void kernel(int *a, int rank) {
if(rank == 0)
printf("%d\t", a[threadIdx.x]);
else
printf("%d\t", a[threadIdx.x]*10);
}
void show_all(int *in, int n){
printf("\n");
for(int i=0; i < n; i++)
printf("%d\t", in[i]);
printf("\n");
}/*show_all*/
int main(int argc, char* argv[]) {
int size = 8;
/*Get current amounts number of GPU*/
int nGPUs = 0;
hipGetDeviceCount(&nGPUs);
printf("nGPUs = %d\n",nGPUs);
/*List GPU Device*/
int *DeviceList = (int *) malloc ( nGPUs * sizeof(int));
for(int i = 0; i < nGPUs; ++i)
DeviceList[i] = i;
/*NCCL Init*/
ncclComm_t* comms = (ncclComm_t*) malloc(sizeof(ncclComm_t) * nGPUs);
hipStream_t* s = (hipStream_t*)malloc(sizeof(hipStream_t)* nGPUs);
ncclCommInitAll(comms, nGPUs, DeviceList);
/*General variables*/
int *host = (int*) malloc(size * sizeof(int));
int **sendbuff = (int**)malloc(nGPUs * sizeof(int*));
int **recvbuff = (int**)malloc(nGPUs * sizeof(int*));
/*Population of vector*/
for(int i = 0; i < size; i++)
host[i] = i + 1;
show_all(host, size);
for(int g = 0; g < nGPUs; g++) {
hipSetDevice(DeviceList[g]);
hipStreamCreate(&s[g]);
hipMalloc(&sendbuff[g], size * sizeof(int));
hipMalloc(&recvbuff[g], size * sizeof(int));
if(g == 0)
hipMemcpy(sendbuff[g], host, size * sizeof(int),hipMemcpyHostToDevice);
}/*for*/
ncclGroupStart();
for(int g = 0; g < nGPUs; g++) {
ncclSend(sendbuff[0], size, ncclInt, g, comms[g], s[g]);
ncclRecv(recvbuff[g], size, ncclInt, g, comms[g], s[g]);
}
ncclGroupEnd();
for(int g = 0; g < nGPUs; g++) {
hipSetDevice(DeviceList[g]);
printf("\nThis is device %d\n", g);
if(g==0)
hipLaunchKernelGGL(( kernel) , dim3(1) , dim3(size) , 0, 0, sendbuff[g], g);
else
hipLaunchKernelGGL(( kernel) , dim3(1) , dim3(size) , 0, 0, recvbuff[g], g);
hipDeviceSynchronize();
}
printf("\n");
for (int g = 0; g < nGPUs; g++) {
hipSetDevice(DeviceList[g]);
hipStreamSynchronize(s[g]);
}
for(int g = 0; g < nGPUs; g++) {
hipSetDevice(DeviceList[g]);
hipStreamDestroy(s[g]);
}
for(int g = 0; g < nGPUs; g++) {
ncclCommDestroy(comms[g]);
}
free(s);
free(host);
hipFree(sendbuff);
hipFree(recvbuff);
return 0;
}/*main*/
| aac4a108023848d510b3827d6db147c6a91ba787.cu | /*%****************************************************************************80
% Code:
% ncclSendRecv.cu
%
% Purpose:
% Implements sample send/recv code using the package NCCL (p2p).
%
% Modified:
% Aug 18 2020 10:57
%
% Author:
% Murilo Boratto <murilo.boratto 'at' fieb.org.br>
%
% How to Compile:
% nvcc ncclSendRecv.cu -o object -lnccl
%
% HowtoExecute:
% ./object
%
%****************************************************************************80*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <nccl.h>
__global__ void kernel(int *a, int rank) {
if(rank == 0)
printf("%d\t", a[threadIdx.x]);
else
printf("%d\t", a[threadIdx.x]*10);
}
void show_all(int *in, int n){
printf("\n");
for(int i=0; i < n; i++)
printf("%d\t", in[i]);
printf("\n");
}/*show_all*/
int main(int argc, char* argv[]) {
int size = 8;
/*Get current amounts number of GPU*/
int nGPUs = 0;
cudaGetDeviceCount(&nGPUs);
printf("nGPUs = %d\n",nGPUs);
/*List GPU Device*/
int *DeviceList = (int *) malloc ( nGPUs * sizeof(int));
for(int i = 0; i < nGPUs; ++i)
DeviceList[i] = i;
/*NCCL Init*/
ncclComm_t* comms = (ncclComm_t*) malloc(sizeof(ncclComm_t) * nGPUs);
cudaStream_t* s = (cudaStream_t*)malloc(sizeof(cudaStream_t)* nGPUs);
ncclCommInitAll(comms, nGPUs, DeviceList);
/*General variables*/
int *host = (int*) malloc(size * sizeof(int));
int **sendbuff = (int**)malloc(nGPUs * sizeof(int*));
int **recvbuff = (int**)malloc(nGPUs * sizeof(int*));
/*Population of vector*/
for(int i = 0; i < size; i++)
host[i] = i + 1;
show_all(host, size);
for(int g = 0; g < nGPUs; g++) {
cudaSetDevice(DeviceList[g]);
cudaStreamCreate(&s[g]);
cudaMalloc(&sendbuff[g], size * sizeof(int));
cudaMalloc(&recvbuff[g], size * sizeof(int));
if(g == 0)
cudaMemcpy(sendbuff[g], host, size * sizeof(int),cudaMemcpyHostToDevice);
}/*for*/
ncclGroupStart();
for(int g = 0; g < nGPUs; g++) {
ncclSend(sendbuff[0], size, ncclInt, g, comms[g], s[g]);
ncclRecv(recvbuff[g], size, ncclInt, g, comms[g], s[g]);
}
ncclGroupEnd();
for(int g = 0; g < nGPUs; g++) {
cudaSetDevice(DeviceList[g]);
printf("\nThis is device %d\n", g);
if(g==0)
kernel <<< 1 , size >>> (sendbuff[g], g);
else
kernel <<< 1 , size >>> (recvbuff[g], g);
cudaDeviceSynchronize();
}
printf("\n");
for (int g = 0; g < nGPUs; g++) {
cudaSetDevice(DeviceList[g]);
cudaStreamSynchronize(s[g]);
}
for(int g = 0; g < nGPUs; g++) {
cudaSetDevice(DeviceList[g]);
cudaStreamDestroy(s[g]);
}
for(int g = 0; g < nGPUs; g++) {
ncclCommDestroy(comms[g]);
}
free(s);
free(host);
cudaFree(sendbuff);
cudaFree(recvbuff);
return 0;
}/*main*/
|
4dcd13726ae9790a0fb6fd6e6b169753d3cc5e82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#include <rocm_smi/rocm_smi.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double* __tilevar_0__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
double* __tilevar_1__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-4);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-4);
double t2=0.0f, t3=0.0f, out = 0.0f;
double b2=0.0f, b3=0.0f;
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))){
// Bottom
double __temp_3__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_7__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
double __temp_12__ = (__tilevar_0__[ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
double __temp_17__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
double __temp_22__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
double __temp_27__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
double __temp_32__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
double __temp_37__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
double __temp_42__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t2 += __temp_43__;
//Mid
double __temp_47__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_52__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
double __temp_57__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
double __temp_62__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
double __temp_67__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
double __temp_72__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
double __temp_77__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
double __temp_82__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
double __temp_87__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b2 += __temp_88__;
// Top
double __temp_92__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_97__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
double __temp_102__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
double __temp_107__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
double __temp_112__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
double __temp_117__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
double __temp_122__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
double __temp_127__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
double __temp_132__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))){
double __temp_3__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_7__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
double __temp_12__ = (__tilevar_1__[ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
double __temp_17__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
double __temp_22__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
double __temp_27__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
double __temp_32__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
double __temp_37__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
double __temp_42__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t3 += __temp_43__;
//Mid
double __temp_47__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_52__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
double __temp_57__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
double __temp_62__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
double __temp_67__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
double __temp_72__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
double __temp_77__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
double __temp_82__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
double __temp_87__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b3 += __temp_88__;
// Top
double __temp_92__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_97__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
double __temp_102__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
double __temp_107__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
double __temp_112__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
double __temp_117__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
double __temp_122__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
double __temp_127__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
double __temp_132__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-2,0))] = out;
}
__syncthreads ();
// Now rotate
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b2;
b2 = t2;
t2 = 0.0;
out= b3;
b3 = t3;
t3 = 0.0;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d27pt(double * h_input, int L, int M, int N, double * __var_0__){
/* Host allocation Begin */
double * input;
hipMalloc(&input,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(double)*(L*M*N), memcpy_kind_h_input);
}
double * __var_1__;
hipMalloc(&__var_1__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
hipMalloc(&__var_2__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-4);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
unsigned int power1, power2;
rsmi_status_t result;
uint32_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(RSMI_STATUS_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(RSMI_STATUS_SUCCESS == result);
hipDeviceSynchronize();
for (int x=0; x<500; x++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
hipDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(RSMI_STATUS_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(double)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
}
/*Host Free End*/
| 4dcd13726ae9790a0fb6fd6e6b169753d3cc5e82.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#include <nvml.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double* __tilevar_0__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
double* __tilevar_1__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)-4);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-4);
double t2=0.0f, t3=0.0f, out = 0.0f;
double b2=0.0f, b3=0.0f;
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))){
// Bottom
double __temp_3__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_7__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
double __temp_12__ = (__tilevar_0__[ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
double __temp_17__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
double __temp_22__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
double __temp_27__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
double __temp_32__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
double __temp_37__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
double __temp_42__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t2 += __temp_43__;
//Mid
double __temp_47__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_52__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
double __temp_57__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
double __temp_62__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
double __temp_67__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
double __temp_72__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
double __temp_77__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
double __temp_82__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
double __temp_87__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b2 += __temp_88__;
// Top
double __temp_92__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_97__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
double __temp_102__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
double __temp_107__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
double __temp_112__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
double __temp_117__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
double __temp_122__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
double __temp_127__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
double __temp_132__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))){
double __temp_3__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_7__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_8__ = (0.5f * __temp_3__ + 0.7f *__temp_7__);
double __temp_12__ = (__tilevar_1__[ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_13__ = (__temp_8__ + 0.9f * __temp_12__);
double __temp_17__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_18__ = (__temp_13__ + 1.2f * __temp_17__);
double __temp_22__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_23__ = (__temp_18__ + 1.5f * __temp_22__);
double __temp_27__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_28__ = (__temp_23__ + 1.2f * __temp_27__);
double __temp_32__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_33__ = (__temp_28__ + 0.9f * __temp_32__);
double __temp_37__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_38__ = (__temp_33__ + 0.7f * __temp_37__);
double __temp_42__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_43__ = (__temp_38__ + 0.5f * __temp_42__) / 159;
t3 += __temp_43__;
//Mid
double __temp_47__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_52__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
double __temp_57__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
double __temp_62__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
double __temp_67__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
double __temp_72__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
double __temp_77__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
double __temp_82__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
double __temp_87__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b3 += __temp_88__;
// Top
double __temp_92__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_97__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_98__ = (0.52f * __temp_92__ + 0.72f * __temp_97__);
double __temp_102__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
double __temp_103__ = (__temp_98__ + 0.92f * __temp_102__);
double __temp_107__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_108__ = (__temp_103__ + 1.22f * __temp_107__);
double __temp_112__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_113__ = (__temp_108__ + 1.52f * __temp_112__);
double __temp_117__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
double __temp_118__ = (__temp_113__ + 1.22f * __temp_117__);
double __temp_122__ = (__tilevar_1__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_123__ = (__temp_118__ + 0.92f * __temp_122__);
double __temp_127__ = (__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_128__ = (__temp_123__ + 0.72f * __temp_127__);
double __temp_132__ = (__tilevar_1__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
double __temp_133__ = (__temp_128__ + 0.52f * __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-2,0))] = out;
}
__syncthreads ();
// Now rotate
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b2;
b2 = t2;
t2 = 0.0;
out= b3;
b3 = t3;
t3 = 0.0;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(2*FORMA_BLOCKDIM_Y*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d27pt(double * h_input, int L, int M, int N, double * __var_0__){
/* Host allocation Begin */
double * input;
cudaMalloc(&input,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(double)*(L*M*N), memcpy_kind_h_input);
}
double * __var_1__;
cudaMalloc(&__var_1__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
cudaMalloc(&__var_2__,sizeof(double)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-4);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-4);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
unsigned int power1, power2;
nvmlReturn_t result;
nvmlDevice_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(NVML_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(NVML_SUCCESS == result);
cudaDeviceSynchronize();
for (int x=0; x<500; x++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
cudaDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(NVML_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(double)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
}
/*Host Free End*/
|
17161d6023d13e21fce7dc64209df71bb9cb5c94.hip | // !!! This is a file automatically generated by hipify!!!
//====================================================================================================100
// UPDATE
//====================================================================================================100
// 2006.03 Rob Janiczek
// --creation of prototype version
// 2006.03 Drew Gilliam
// --rewriting of prototype version into current version
// --got rid of multiple function calls, all code in a
// single function (for speed)
// --code cleanup & commenting
// --code optimization efforts
// 2006.04 Drew Gilliam
// --added diffusion coefficent saturation on [0,1]
// 2009.12 Lukasz G. Szafaryn
// -- reading from image, command line inputs
// 2010.01 Lukasz G. Szafaryn
// --comments
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "define.c"
#include "extract_kernel.hip"
#include "prepare_kernel.cu"
#include "reduce_kernel.hip"
#include "srad_kernel.hip"
#include "srad2_kernel.cu"
#include "compress_kernel.cu"
#include "graphics.c"
#include "resize.c"
#include "timer.c"
#include "omp.h"
#include "device.c" // (in library path specified to compiler) needed by for device functions
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
int main(int argc, char *argv []){
//================================================================================80
// VARIABLES
//================================================================================80
// time
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
long long time7;
long long time8;
long long time9;
long long time10;
long long time11;
long long time12;
long long time_start;
time0 = get_time();
// inputs image, input paramenters
fp* image_ori; // originalinput image
int image_ori_rows;
int image_ori_cols;
long image_ori_elem;
// inputs image, input paramenters
fp* image; // input image
int Nr,Nc; // IMAGE nbr of rows/cols/elements
long Ne;
// algorithm parameters
int niter; // nbr of iterations
fp lambda; // update step size
// size of IMAGE
int r1,r2,c1,c2; // row/col coordinates of uniform ROI
long NeROI; // ROI nbr of elements
// surrounding pixel indicies
int *iN,*iS,*jE,*jW;
// counters
int iter; // primary loop
long i,j; // image row/col
// memory sizes
int mem_size_i;
int mem_size_j;
int mem_size_single;
//================================================================================80
// GPU VARIABLES
//================================================================================80
// CUDA kernel execution parameters
dim3 threads;
int blocks_x;
dim3 blocks;
dim3 blocks2;
dim3 blocks3;
// memory sizes
int mem_size; // matrix memory size
// HOST
int no;
int mul;
fp total;
fp total2;
fp meanROI;
fp meanROI2;
fp varROI;
fp q0sqr;
// DEVICE
fp* d_sums; // partial sum
fp* d_sums2;
int* d_iN;
int* d_iS;
int* d_jE;
int* d_jW;
fp* d_dN;
fp* d_dS;
fp* d_dW;
fp* d_dE;
fp* d_I; // input IMAGE on DEVICE
fp* d_c;
time1 = get_time();
//================================================================================80
// GET INPUT PARAMETERS
//================================================================================80
if(argc != 5){
printf("ERROR: wrong number of arguments\n");
return 0;
}
else{
niter = atoi(argv[1]);
lambda = atof(argv[2]);
Nr = atoi(argv[3]); // it is 502 in the original image
Nc = atoi(argv[4]); // it is 458 in the original image
}
time2 = get_time();
//================================================================================80
// READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN)
//================================================================================80
// read image
image_ori_rows = 502;
image_ori_cols = 458;
image_ori_elem = image_ori_rows * image_ori_cols;
image_ori = (fp*)malloc(sizeof(fp) * image_ori_elem);
read_graphics( "../../../data/srad/image.pgm",
image_ori,
image_ori_rows,
image_ori_cols,
1);
time3 = get_time();
//================================================================================80
// RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig)
//================================================================================80
Ne = Nr*Nc;
image = (fp*)malloc(sizeof(fp) * Ne);
resize( image_ori,
image_ori_rows,
image_ori_cols,
image,
Nr,
Nc,
1);
time4 = get_time();
//================================================================================80
// SETUP
//================================================================================80
r1 = 0; // top row index of ROI
r2 = Nr - 1; // bottom row index of ROI
c1 = 0; // left column index of ROI
c2 = Nc - 1; // right column index of ROI
// ROI image size
NeROI = (r2-r1+1)*(c2-c1+1); // number of elements in ROI, ROI size
// allocate variables for surrounding pixels
mem_size_i = sizeof(int) * Nr; //
iN = (int *)malloc(mem_size_i) ; // north surrounding element
iS = (int *)malloc(mem_size_i) ; // south surrounding element
mem_size_j = sizeof(int) * Nc; //
jW = (int *)malloc(mem_size_j) ; // west surrounding element
jE = (int *)malloc(mem_size_j) ; // east surrounding element
// N/S/W/E indices of surrounding pixels (every element of IMAGE)
for (i=0; i<Nr; i++) {
iN[i] = i-1; // holds index of IMAGE row above
iS[i] = i+1; // holds index of IMAGE row below
}
for (j=0; j<Nc; j++) {
jW[j] = j-1; // holds index of IMAGE column on the left
jE[j] = j+1; // holds index of IMAGE column on the right
}
// N/S/W/E boundary conditions, fix surrounding indices outside boundary of image
iN[0] = 0; // changes IMAGE top row index from -1 to 0
iS[Nr-1] = Nr-1; // changes IMAGE bottom row index from Nr to Nr-1
jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0
jE[Nc-1] = Nc-1; // changes IMAGE rightmost column index from Nc to Nc-1
//================================================================================80
// GPU SETUP
//================================================================================80
//double start_timer = omp_get_wtime();
// allocate memory for entire IMAGE on DEVICE
//stage1_start
time_start = get_time();
mem_size = sizeof(fp) * Ne; // get the size of float representation of input IMAGE
hipMalloc((void **)&d_I, mem_size); //
// allocate memory for coordinates on DEVICE
hipMalloc((void **)&d_iN, mem_size_i); //
hipMemcpy(d_iN, iN, mem_size_i, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_iS, mem_size_i); //
hipMemcpy(d_iS, iS, mem_size_i, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_jE, mem_size_j); //
hipMemcpy(d_jE, jE, mem_size_j, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_jW, mem_size_j); //
hipMemcpy(d_jW, jW, mem_size_j, hipMemcpyHostToDevice); //
// allocate memory for partial sums on DEVICE
hipMalloc((void **)&d_sums, mem_size); //
hipMalloc((void **)&d_sums2, mem_size); //
// allocate memory for derivatives
hipMalloc((void **)&d_dN, mem_size); //
hipMalloc((void **)&d_dS, mem_size); //
hipMalloc((void **)&d_dW, mem_size); //
hipMalloc((void **)&d_dE, mem_size); //
// allocate memory for coefficient on DEVICE
hipMalloc((void **)&d_c, mem_size); //
checkCUDAError("setup");
//================================================================================80
// KERNEL EXECUTION PARAMETERS
//================================================================================80
// all kernels operating on entire matrix
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks_x = Ne/threads.x;
if (Ne % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks.x = blocks_x; // define the number of blocks in the grid
blocks.y = 1;
time5 = get_time();
//================================================================================80
// COPY INPUT TO CPU
//================================================================================80
hipMemcpy(d_I, image, mem_size, hipMemcpyHostToDevice);
//stage1_end
//stage2_start
time6 = get_time();
//================================================================================80
// SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//================================================================================80
hipLaunchKernelGGL(( extract), dim3(blocks), dim3(threads), 0, 0, Ne,
d_I);
checkCUDAError("extract");
//
time7 = get_time();
//================================================================================80
// COMPUTATION
//================================================================================80
// printf("iterations: ");
// execute main loop
for (iter=0; iter<niter; iter++){ // do for the number of iterations input parameter
// printf("%d ", iter);
// fflush(NULL);
// execute square kernel
hipLaunchKernelGGL(( prepare), dim3(blocks), dim3(threads), 0, 0, Ne,
d_I,
d_sums,
d_sums2);
checkCUDAError("prepare");
// performs subsequent reductions of sums
blocks2.x = blocks.x; // original number of blocks
blocks2.y = blocks.y;
no = Ne; // original number of sum elements
mul = 1; // original multiplier
while(blocks2.x != 0){
checkCUDAError("before reduce");
// run kernel
hipLaunchKernelGGL(( reduce), dim3(blocks2), dim3(threads), 0, 0, Ne,
no,
mul,
d_sums,
d_sums2);
checkCUDAError("reduce");
// update execution parameters
no = blocks2.x; // get current number of elements
if(blocks2.x == 1){
blocks2.x = 0;
}
else{
mul = mul * NUMBER_THREADS; // update the increment
blocks_x = blocks2.x/threads.x; // number of blocks
if (blocks2.x % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks2.x = blocks_x;
blocks2.y = 1;
}
checkCUDAError("after reduce");
}
checkCUDAError("before copy sum");
// copy total sums to device
mem_size_single = sizeof(fp) * 1;
hipMemcpy(&total, d_sums, mem_size_single, hipMemcpyDeviceToHost);
hipMemcpy(&total2, d_sums2, mem_size_single, hipMemcpyDeviceToHost);
checkCUDAError("copy sum");
// calculate statistics
meanROI = total / fp(NeROI); // gets mean (average) value of element in ROI
meanROI2 = meanROI * meanROI; //
varROI = (total2 / fp(NeROI)) - meanROI2; // gets variance of ROI
q0sqr = varROI / meanROI2; // gets standard deviation of ROI
// execute srad kernel
hipLaunchKernelGGL(( srad), dim3(blocks), dim3(threads), 0, 0, lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
q0sqr, // standard deviation of ROI
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad");
// execute srad2 kernel
hipLaunchKernelGGL(( srad2), dim3(blocks), dim3(threads), 0, 0, lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad2");
}
// printf("\n");
time8 = get_time();
//================================================================================80
// SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//================================================================================80
hipLaunchKernelGGL(( compress), dim3(blocks), dim3(threads), 0, 0, Ne,
d_I);
checkCUDAError("compress");
//stage2_end
//stage3_start
time9 = get_time();
//================================================================================80
// COPY RESULTS BACK TO CPU
//================================================================================80
hipMemcpy(image, d_I, mem_size, hipMemcpyDeviceToHost);
checkCUDAError("copy back");
//double end_timer = omp_get_wtime();
//printf("Time10 - GPU_Setup: %.8f\n",(end_timer-start_timer));
time10 = get_time();
//stage_minus_start
//================================================================================80
// WRITE IMAGE AFTER PROCESSING
//================================================================================80
write_graphics( "image_out.pgm",
image,
Nr,
Nc,
1,
255);
time11 = get_time();
//stage_minus_end
//================================================================================80
// DEALLOCATE
//================================================================================80
free(image_ori);
free(image);
free(iN);
free(iS);
free(jW);
free(jE);
hipFree(d_I);
hipFree(d_c);
hipFree(d_iN);
hipFree(d_iS);
hipFree(d_jE);
hipFree(d_jW);
hipFree(d_dN);
hipFree(d_dS);
hipFree(d_dE);
hipFree(d_dW);
hipFree(d_sums);
hipFree(d_sums2);
//stage3_end
time12 = get_time();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%15.12f s, %15.12f % : SETUP VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : READ COMMAND LINE PARAMETERS\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : READ IMAGE FROM FILE\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : RESIZE IMAGE\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : GPU DRIVER INIT, CPU/GPU SETUP, MEMORY ALLOCATION\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO CPU->GPU\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : EXTRACT IMAGE\n", (float) (time7-time6) / 1000000, (float) (time7-time6) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COMPUTE\n", (float) (time8-time7) / 1000000, (float) (time8-time7) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COMPRESS IMAGE\n", (float) (time9-time8) / 1000000, (float) (time9-time8) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO GPU->CPU\n", (float) (time10-time9) / 1000000, (float) (time10-time9) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : SAVE IMAGE INTO FILE\n", (float) (time11-time10) / 1000000, (float) (time11-time10) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : FREE MEMORY\n", (float) (time12-time11) / 1000000, (float) (time12-time11) / (float) (time12-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time12-time0) / 1000000);
printf("stage1 :%.12f s\n", (float) (time6-time_start) / 1000000);
printf("stage2 :%.12f s\n", (float) (time9-time6) / 1000000);
printf("stage3 :%.12f s\n", (float) (time12-time9-(time11-time10)) / 1000000);
printf("total :%.12f s\n", (float) (time12-time_start-(time11-time10)) / 1000000);
}
//====================================================================================================100
// END OF FILE
//====================================================================================================100
| 17161d6023d13e21fce7dc64209df71bb9cb5c94.cu | //====================================================================================================100
// UPDATE
//====================================================================================================100
// 2006.03 Rob Janiczek
// --creation of prototype version
// 2006.03 Drew Gilliam
// --rewriting of prototype version into current version
// --got rid of multiple function calls, all code in a
// single function (for speed)
// --code cleanup & commenting
// --code optimization efforts
// 2006.04 Drew Gilliam
// --added diffusion coefficent saturation on [0,1]
// 2009.12 Lukasz G. Szafaryn
// -- reading from image, command line inputs
// 2010.01 Lukasz G. Szafaryn
// --comments
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <cuda.h>
#include "define.c"
#include "extract_kernel.cu"
#include "prepare_kernel.cu"
#include "reduce_kernel.cu"
#include "srad_kernel.cu"
#include "srad2_kernel.cu"
#include "compress_kernel.cu"
#include "graphics.c"
#include "resize.c"
#include "timer.c"
#include "omp.h"
#include "device.c" // (in library path specified to compiler) needed by for device functions
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
int main(int argc, char *argv []){
//================================================================================80
// VARIABLES
//================================================================================80
// time
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
long long time7;
long long time8;
long long time9;
long long time10;
long long time11;
long long time12;
long long time_start;
time0 = get_time();
// inputs image, input paramenters
fp* image_ori; // originalinput image
int image_ori_rows;
int image_ori_cols;
long image_ori_elem;
// inputs image, input paramenters
fp* image; // input image
int Nr,Nc; // IMAGE nbr of rows/cols/elements
long Ne;
// algorithm parameters
int niter; // nbr of iterations
fp lambda; // update step size
// size of IMAGE
int r1,r2,c1,c2; // row/col coordinates of uniform ROI
long NeROI; // ROI nbr of elements
// surrounding pixel indicies
int *iN,*iS,*jE,*jW;
// counters
int iter; // primary loop
long i,j; // image row/col
// memory sizes
int mem_size_i;
int mem_size_j;
int mem_size_single;
//================================================================================80
// GPU VARIABLES
//================================================================================80
// CUDA kernel execution parameters
dim3 threads;
int blocks_x;
dim3 blocks;
dim3 blocks2;
dim3 blocks3;
// memory sizes
int mem_size; // matrix memory size
// HOST
int no;
int mul;
fp total;
fp total2;
fp meanROI;
fp meanROI2;
fp varROI;
fp q0sqr;
// DEVICE
fp* d_sums; // partial sum
fp* d_sums2;
int* d_iN;
int* d_iS;
int* d_jE;
int* d_jW;
fp* d_dN;
fp* d_dS;
fp* d_dW;
fp* d_dE;
fp* d_I; // input IMAGE on DEVICE
fp* d_c;
time1 = get_time();
//================================================================================80
// GET INPUT PARAMETERS
//================================================================================80
if(argc != 5){
printf("ERROR: wrong number of arguments\n");
return 0;
}
else{
niter = atoi(argv[1]);
lambda = atof(argv[2]);
Nr = atoi(argv[3]); // it is 502 in the original image
Nc = atoi(argv[4]); // it is 458 in the original image
}
time2 = get_time();
//================================================================================80
// READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN)
//================================================================================80
// read image
image_ori_rows = 502;
image_ori_cols = 458;
image_ori_elem = image_ori_rows * image_ori_cols;
image_ori = (fp*)malloc(sizeof(fp) * image_ori_elem);
read_graphics( "../../../data/srad/image.pgm",
image_ori,
image_ori_rows,
image_ori_cols,
1);
time3 = get_time();
//================================================================================80
// RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig)
//================================================================================80
Ne = Nr*Nc;
image = (fp*)malloc(sizeof(fp) * Ne);
resize( image_ori,
image_ori_rows,
image_ori_cols,
image,
Nr,
Nc,
1);
time4 = get_time();
//================================================================================80
// SETUP
//================================================================================80
r1 = 0; // top row index of ROI
r2 = Nr - 1; // bottom row index of ROI
c1 = 0; // left column index of ROI
c2 = Nc - 1; // right column index of ROI
// ROI image size
NeROI = (r2-r1+1)*(c2-c1+1); // number of elements in ROI, ROI size
// allocate variables for surrounding pixels
mem_size_i = sizeof(int) * Nr; //
iN = (int *)malloc(mem_size_i) ; // north surrounding element
iS = (int *)malloc(mem_size_i) ; // south surrounding element
mem_size_j = sizeof(int) * Nc; //
jW = (int *)malloc(mem_size_j) ; // west surrounding element
jE = (int *)malloc(mem_size_j) ; // east surrounding element
// N/S/W/E indices of surrounding pixels (every element of IMAGE)
for (i=0; i<Nr; i++) {
iN[i] = i-1; // holds index of IMAGE row above
iS[i] = i+1; // holds index of IMAGE row below
}
for (j=0; j<Nc; j++) {
jW[j] = j-1; // holds index of IMAGE column on the left
jE[j] = j+1; // holds index of IMAGE column on the right
}
// N/S/W/E boundary conditions, fix surrounding indices outside boundary of image
iN[0] = 0; // changes IMAGE top row index from -1 to 0
iS[Nr-1] = Nr-1; // changes IMAGE bottom row index from Nr to Nr-1
jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0
jE[Nc-1] = Nc-1; // changes IMAGE rightmost column index from Nc to Nc-1
//================================================================================80
// GPU SETUP
//================================================================================80
//double start_timer = omp_get_wtime();
// allocate memory for entire IMAGE on DEVICE
//stage1_start
time_start = get_time();
mem_size = sizeof(fp) * Ne; // get the size of float representation of input IMAGE
cudaMalloc((void **)&d_I, mem_size); //
// allocate memory for coordinates on DEVICE
cudaMalloc((void **)&d_iN, mem_size_i); //
cudaMemcpy(d_iN, iN, mem_size_i, cudaMemcpyHostToDevice); //
cudaMalloc((void **)&d_iS, mem_size_i); //
cudaMemcpy(d_iS, iS, mem_size_i, cudaMemcpyHostToDevice); //
cudaMalloc((void **)&d_jE, mem_size_j); //
cudaMemcpy(d_jE, jE, mem_size_j, cudaMemcpyHostToDevice); //
cudaMalloc((void **)&d_jW, mem_size_j); //
cudaMemcpy(d_jW, jW, mem_size_j, cudaMemcpyHostToDevice); //
// allocate memory for partial sums on DEVICE
cudaMalloc((void **)&d_sums, mem_size); //
cudaMalloc((void **)&d_sums2, mem_size); //
// allocate memory for derivatives
cudaMalloc((void **)&d_dN, mem_size); //
cudaMalloc((void **)&d_dS, mem_size); //
cudaMalloc((void **)&d_dW, mem_size); //
cudaMalloc((void **)&d_dE, mem_size); //
// allocate memory for coefficient on DEVICE
cudaMalloc((void **)&d_c, mem_size); //
checkCUDAError("setup");
//================================================================================80
// KERNEL EXECUTION PARAMETERS
//================================================================================80
// all kernels operating on entire matrix
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks_x = Ne/threads.x;
if (Ne % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks.x = blocks_x; // define the number of blocks in the grid
blocks.y = 1;
time5 = get_time();
//================================================================================80
// COPY INPUT TO CPU
//================================================================================80
cudaMemcpy(d_I, image, mem_size, cudaMemcpyHostToDevice);
//stage1_end
//stage2_start
time6 = get_time();
//================================================================================80
// SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//================================================================================80
extract<<<blocks, threads>>>( Ne,
d_I);
checkCUDAError("extract");
//
time7 = get_time();
//================================================================================80
// COMPUTATION
//================================================================================80
// printf("iterations: ");
// execute main loop
for (iter=0; iter<niter; iter++){ // do for the number of iterations input parameter
// printf("%d ", iter);
// fflush(NULL);
// execute square kernel
prepare<<<blocks, threads>>>( Ne,
d_I,
d_sums,
d_sums2);
checkCUDAError("prepare");
// performs subsequent reductions of sums
blocks2.x = blocks.x; // original number of blocks
blocks2.y = blocks.y;
no = Ne; // original number of sum elements
mul = 1; // original multiplier
while(blocks2.x != 0){
checkCUDAError("before reduce");
// run kernel
reduce<<<blocks2, threads>>>( Ne,
no,
mul,
d_sums,
d_sums2);
checkCUDAError("reduce");
// update execution parameters
no = blocks2.x; // get current number of elements
if(blocks2.x == 1){
blocks2.x = 0;
}
else{
mul = mul * NUMBER_THREADS; // update the increment
blocks_x = blocks2.x/threads.x; // number of blocks
if (blocks2.x % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks2.x = blocks_x;
blocks2.y = 1;
}
checkCUDAError("after reduce");
}
checkCUDAError("before copy sum");
// copy total sums to device
mem_size_single = sizeof(fp) * 1;
cudaMemcpy(&total, d_sums, mem_size_single, cudaMemcpyDeviceToHost);
cudaMemcpy(&total2, d_sums2, mem_size_single, cudaMemcpyDeviceToHost);
checkCUDAError("copy sum");
// calculate statistics
meanROI = total / fp(NeROI); // gets mean (average) value of element in ROI
meanROI2 = meanROI * meanROI; //
varROI = (total2 / fp(NeROI)) - meanROI2; // gets variance of ROI
q0sqr = varROI / meanROI2; // gets standard deviation of ROI
// execute srad kernel
srad<<<blocks, threads>>>( lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
q0sqr, // standard deviation of ROI
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad");
// execute srad2 kernel
srad2<<<blocks, threads>>>( lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad2");
}
// printf("\n");
time8 = get_time();
//================================================================================80
// SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//================================================================================80
compress<<<blocks, threads>>>( Ne,
d_I);
checkCUDAError("compress");
//stage2_end
//stage3_start
time9 = get_time();
//================================================================================80
// COPY RESULTS BACK TO CPU
//================================================================================80
cudaMemcpy(image, d_I, mem_size, cudaMemcpyDeviceToHost);
checkCUDAError("copy back");
//double end_timer = omp_get_wtime();
//printf("Time10 - GPU_Setup: %.8f\n",(end_timer-start_timer));
time10 = get_time();
//stage_minus_start
//================================================================================80
// WRITE IMAGE AFTER PROCESSING
//================================================================================80
write_graphics( "image_out.pgm",
image,
Nr,
Nc,
1,
255);
time11 = get_time();
//stage_minus_end
//================================================================================80
// DEALLOCATE
//================================================================================80
free(image_ori);
free(image);
free(iN);
free(iS);
free(jW);
free(jE);
cudaFree(d_I);
cudaFree(d_c);
cudaFree(d_iN);
cudaFree(d_iS);
cudaFree(d_jE);
cudaFree(d_jW);
cudaFree(d_dN);
cudaFree(d_dS);
cudaFree(d_dE);
cudaFree(d_dW);
cudaFree(d_sums);
cudaFree(d_sums2);
//stage3_end
time12 = get_time();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%15.12f s, %15.12f % : SETUP VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : READ COMMAND LINE PARAMETERS\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : READ IMAGE FROM FILE\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : RESIZE IMAGE\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : GPU DRIVER INIT, CPU/GPU SETUP, MEMORY ALLOCATION\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO CPU->GPU\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : EXTRACT IMAGE\n", (float) (time7-time6) / 1000000, (float) (time7-time6) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COMPUTE\n", (float) (time8-time7) / 1000000, (float) (time8-time7) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COMPRESS IMAGE\n", (float) (time9-time8) / 1000000, (float) (time9-time8) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO GPU->CPU\n", (float) (time10-time9) / 1000000, (float) (time10-time9) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : SAVE IMAGE INTO FILE\n", (float) (time11-time10) / 1000000, (float) (time11-time10) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : FREE MEMORY\n", (float) (time12-time11) / 1000000, (float) (time12-time11) / (float) (time12-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time12-time0) / 1000000);
printf("stage1 :%.12f s\n", (float) (time6-time_start) / 1000000);
printf("stage2 :%.12f s\n", (float) (time9-time6) / 1000000);
printf("stage3 :%.12f s\n", (float) (time12-time9-(time11-time10)) / 1000000);
printf("total :%.12f s\n", (float) (time12-time_start-(time11-time10)) / 1000000);
}
//====================================================================================================100
// END OF FILE
//====================================================================================================100
|
35217728b5026f089bc8bd7d0a59f7da4a155726.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2021 by Contributors
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <GPUTreeShap/gpu_treeshap.h>
#include <memory>
#include "xgboost/data.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
#include "xgboost/host_device_vector.h"
#include "predict_fn.h"
#include "../gbm/gbtree_model.h"
#include "../data/ellpack_page.cuh"
#include "../data/device_adapter.cuh"
#include "../common/common.h"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct TreeView {
RegTree::CategoricalSplitMatrix cats;
common::Span<RegTree::Node const> d_tree;
XGBOOST_DEVICE
TreeView(size_t tree_begin, size_t tree_idx,
common::Span<const RegTree::Node> d_nodes,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories) {
auto begin = d_tree_segments[tree_idx - tree_begin];
auto n_nodes = d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin];
d_tree = d_nodes.subspan(begin, n_nodes);
auto tree_cat_ptrs = d_cat_node_segments.subspan(begin, n_nodes);
auto tree_split_types = d_tree_split_types.subspan(begin, n_nodes);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
cats.split_type = tree_split_types;
cats.categories = tree_categories;
cats.node_ptr = tree_cat_ptrs;
}
__device__ bool HasCategoricalSplit() const {
return !cats.categories.empty();
}
};
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start, float)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool,
bst_feature_t, bst_row_t, size_t, float)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
if (common::IsCat(matrix.feature_types, fidx)) {
return matrix.gidx_fvalue_map[gidx];
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
data::IsValidFunctor is_valid;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start, float missing) :
batch{batch},
columns{num_features},
use_shared{use_shared},
is_valid{missing} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
auto value = batch.GetElement(i).value;
if (is_valid(value)) {
smem[threadIdx.x * num_features + (i - beg)] = value;
}
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
auto value = batch.GetElement(ridx * columns + fidx).value;
if (is_valid(value)) {
return value;
} else {
return nan("");
}
}
};
template <bool has_missing, bool has_categorical, typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
bool is_missing = common::CheckNAN(fvalue);
nidx = GetNextNode<has_missing, has_categorical>(n, nidx, fvalue,
is_missing, tree.cats);
n = tree.d_tree[nidx];
}
return nidx;
}
template <bool has_missing, typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = -1;
if (tree.HasCategoricalSplit()) {
nidx = GetLeafIndex<has_missing, true>(ridx, tree, loader);
} else {
nidx = GetLeafIndex<has_missing, false>(ridx, tree, loader);
}
return tree.d_tree[nidx].LeafValue();
}
template <typename Loader, typename Data>
__global__ void
PredictLeafKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared,
float missing) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
for (size_t tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_node_t leaf = -1;
if (d_tree.HasCategoricalSplit()) {
leaf = GetLeafIndex<true, true>(ridx, d_tree, &loader);
} else {
leaf = GetLeafIndex<true, false>(ridx, d_tree, &loader);
}
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data, bool has_missing = true>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group, float missing) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
float leaf = GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(hipMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), hipMemcpyDefault));
dh::safe_cuda(hipMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), hipMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments =
HostDeviceVector<RegTree::Segment>(h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::Segment> &h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->num_output_group;
}
};
struct ShapSplitCondition {
ShapSplitCondition() = default;
XGBOOST_DEVICE
ShapSplitCondition(float feature_lower_bound, float feature_upper_bound,
bool is_missing_branch, common::CatBitField cats)
: feature_lower_bound(feature_lower_bound),
feature_upper_bound(feature_upper_bound),
is_missing_branch(is_missing_branch), categories{std::move(cats)} {
assert(feature_lower_bound <= feature_upper_bound);
}
/*! Feature values >= lower and < upper flow down this path. */
float feature_lower_bound;
float feature_upper_bound;
/*! Feature value set to true flow down this path. */
common::CatBitField categories;
/*! Do missing values flow down this path? */
bool is_missing_branch;
// Does this instance flow down this path?
XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan
if (isnan(x)) {
return is_missing_branch;
}
if (categories.Size() != 0) {
auto cat = static_cast<uint32_t>(x);
return categories.Check(cat);
} else {
return x >= feature_lower_bound && x < feature_upper_bound;
}
}
// the &= op in bitfiled is per cuda thread, this one loops over the entire
// bitfield.
XGBOOST_DEVICE static common::CatBitField Intersect(common::CatBitField l,
common::CatBitField r) {
if (l.Data() == r.Data()) {
return l;
}
if (l.Size() > r.Size()) {
thrust::swap(l, r);
}
for (size_t i = 0; i < r.Bits().size(); ++i) {
l.Bits()[i] &= r.Bits()[i];
}
return l;
}
// Combine two split conditions on the same feature
XGBOOST_DEVICE void Merge(ShapSplitCondition other) {
// Combine duplicate features
if (categories.Size() != 0 || other.categories.Size() != 0) {
categories = Intersect(categories, other.categories);
} else {
feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound);
feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound);
}
is_missing_branch = is_missing_branch && other.is_missing_branch;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>> *paths,
DeviceModel *model, dh::device_vector<uint32_t> *path_categories,
int gpu_id) {
dh::safe_cuda(hipSetDevice(gpu_id));
auto& device_model = *model;
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{int64_t(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::hip::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::hip::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = dh::ToSpan(*paths);
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
auto d_split_types = device_model.split_types.ConstDeviceSpan();
auto d_cat_segments = device_model.categories_tree_segments.ConstDeviceSpan();
auto d_cat_node_segments = device_model.categories_node_segments.ConstDeviceSpan();
size_t max_cat = 0;
if (thrust::any_of(dh::tbegin(d_split_types), dh::tend(d_split_types),
common::IsCatOp{})) {
dh::PinnedMemory pinned;
auto h_max_cat = pinned.GetSpan<RegTree::Segment>(1);
auto max_elem_it = dh::MakeTransformIterator<size_t>(
dh::tbegin(d_cat_node_segments),
[] __device__(RegTree::Segment seg) { return seg.size; });
size_t max_cat_it =
thrust::max_element(thrust::device, max_elem_it,
max_elem_it + d_cat_node_segments.size()) -
max_elem_it;
dh::safe_cuda(hipMemcpy(h_max_cat.data(),
d_cat_node_segments.data() + max_cat_it,
h_max_cat.size_bytes(), hipMemcpyDeviceToHost));
max_cat = h_max_cat[0].size;
CHECK_GE(max_cat, 1);
path_categories->resize(max_cat * paths->size());
}
auto d_model_categories = device_model.categories.DeviceSpan();
common::Span<uint32_t> d_path_categories = dh::ToSpan(*path_categories);
dh::LaunchN(info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
TreeView tree{0, path_info.tree_idx, d_nodes,
d_tree_segments, d_split_types, d_cat_segments,
d_cat_node_segments, d_model_categories};
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = tree.d_tree[child.Parent()];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = -inf;
float upper_bound = inf;
common::CatBitField bits;
if (common::IsCat(tree.cats.split_type, child.Parent())) {
auto path_cats = d_path_categories.subspan(max_cat * output_position, max_cat);
size_t size = tree.cats.node_ptr[child.Parent()].size;
auto node_cats = tree.cats.categories.subspan(tree.cats.node_ptr[child.Parent()].beg, size);
SPAN_CHECK(path_cats.size() >= node_cats.size());
for (size_t i = 0; i < node_cats.size(); ++i) {
path_cats[i] = is_left_path ? ~node_cats[i] : node_cats[i];
}
bits = common::CatBitField{path_cats};
} else {
lower_bound = is_left_path ? -inf : parent.SplitCond();
upper_bound = is_left_path ? parent.SplitCond() : inf;
}
d_paths[output_position--] =
gpu_treeshap::PathElement<ShapSplitCondition>{
idx, parent.SplitIndex(),
group, ShapSplitCondition{lower_bound, upper_bound, is_missing_path, bits},
zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, ShapSplitCondition{-inf, inf, false, {}}, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset, bool is_dense) const {
batch.offset.SetDevice(ctx_->gpu_id);
batch.data.SetDevice(ctx_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
auto const kernel = [&](auto predict_fn) {
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
predict_fn, data, model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group,
nan(""));
};
if (is_dense) {
kernel(PredictKernel<SparsePageLoader, SparsePageView, false>);
} else {
kernel(PredictKernel<SparsePageLoader, SparsePageView, true>);
}
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group, nan(""));
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(ctx_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, ctx_->gpu_id);
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset, dmat->IsDense());
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>(BatchParam{})) {
dmat->Info().feature_types.SetDevice(ctx_->gpu_id);
auto feature_types = dmat->Info().feature_types.ConstDeviceSpan();
this->PredictInternal(
page.Impl()->GetDeviceAccessor(ctx_->gpu_id, feature_types),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (ctx_->gpu_id >= 0 && ctx_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = ctx_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (tree_end == 0) {
tree_end = model.trees.size();
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = dmlc::get<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->ctx_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups, missing);
}
bool InplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<
data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<
data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate, int,
unsigned) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShap<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
dh::LaunchN(
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShapInteractions<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(
row_idx, ngroup, group, n_features, n_features, n_features)] +=
margin.empty() ? base_score : margin[idx];
});
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(ctx_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->ctx_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>(BatchParam{})) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(ctx_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
| 35217728b5026f089bc8bd7d0a59f7da4a155726.cu | /*!
* Copyright 2017-2021 by Contributors
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <GPUTreeShap/gpu_treeshap.h>
#include <memory>
#include "xgboost/data.h"
#include "xgboost/predictor.h"
#include "xgboost/tree_model.h"
#include "xgboost/tree_updater.h"
#include "xgboost/host_device_vector.h"
#include "predict_fn.h"
#include "../gbm/gbtree_model.h"
#include "../data/ellpack_page.cuh"
#include "../data/device_adapter.cuh"
#include "../common/common.h"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace predictor {
DMLC_REGISTRY_FILE_TAG(gpu_predictor);
struct TreeView {
RegTree::CategoricalSplitMatrix cats;
common::Span<RegTree::Node const> d_tree;
XGBOOST_DEVICE
TreeView(size_t tree_begin, size_t tree_idx,
common::Span<const RegTree::Node> d_nodes,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories) {
auto begin = d_tree_segments[tree_idx - tree_begin];
auto n_nodes = d_tree_segments[tree_idx - tree_begin + 1] -
d_tree_segments[tree_idx - tree_begin];
d_tree = d_nodes.subspan(begin, n_nodes);
auto tree_cat_ptrs = d_cat_node_segments.subspan(begin, n_nodes);
auto tree_split_types = d_tree_split_types.subspan(begin, n_nodes);
auto tree_categories =
d_categories.subspan(d_cat_tree_segments[tree_idx - tree_begin],
d_cat_tree_segments[tree_idx - tree_begin + 1] -
d_cat_tree_segments[tree_idx - tree_begin]);
cats.split_type = tree_split_types;
cats.categories = tree_categories;
cats.node_ptr = tree_cat_ptrs;
}
__device__ bool HasCategoricalSplit() const {
return !cats.categories.empty();
}
};
struct SparsePageView {
common::Span<const Entry> d_data;
common::Span<const bst_row_t> d_row_ptr;
bst_feature_t num_features;
SparsePageView() = default;
XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data,
common::Span<const bst_row_t> row_ptr,
bst_feature_t num_features)
: d_data{data}, d_row_ptr{row_ptr}, num_features(num_features) {}
__device__ float GetElement(size_t ridx, size_t fidx) const {
// Binary search
auto begin_ptr = d_data.begin() + d_row_ptr[ridx];
auto end_ptr = d_data.begin() + d_row_ptr[ridx + 1];
if (end_ptr - begin_ptr == this->NumCols()) {
// Bypass span check for dense data
return d_data.data()[d_row_ptr[ridx] + fidx].fvalue;
}
common::Span<const Entry>::iterator previous_middle;
while (end_ptr != begin_ptr) {
auto middle = begin_ptr + (end_ptr - begin_ptr) / 2;
if (middle == previous_middle) {
break;
} else {
previous_middle = middle;
}
if (middle->index == fidx) {
return middle->fvalue;
} else if (middle->index < fidx) {
begin_ptr = middle;
} else {
end_ptr = middle;
}
}
// Value is missing
return nanf("");
}
XGBOOST_DEVICE size_t NumRows() const { return d_row_ptr.size() - 1; }
XGBOOST_DEVICE size_t NumCols() const { return num_features; }
};
struct SparsePageLoader {
bool use_shared;
SparsePageView data;
float* smem;
size_t entry_start;
__device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features,
bst_row_t num_rows, size_t entry_start, float)
: use_shared(use_shared),
data(data),
entry_start(entry_start) {
extern __shared__ float _smem[];
smem = _smem;
// Copy instances
if (use_shared) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
int shared_elements = blockDim.x * data.num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
bst_uint elem_begin = data.d_row_ptr[global_idx];
bst_uint elem_end = data.d_row_ptr[global_idx + 1];
for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) {
Entry elem = data.d_data[elem_idx - entry_start];
smem[threadIdx.x * data.num_features + elem.index] = elem.fvalue;
}
}
__syncthreads();
}
}
__device__ float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * data.num_features + fidx];
} else {
return data.GetElement(ridx, fidx);
}
}
};
struct EllpackLoader {
EllpackDeviceAccessor const& matrix;
XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool,
bst_feature_t, bst_row_t, size_t, float)
: matrix{m} {}
__device__ __forceinline__ float GetElement(size_t ridx, size_t fidx) const {
auto gidx = matrix.GetBinIndex(ridx, fidx);
if (gidx == -1) {
return nan("");
}
if (common::IsCat(matrix.feature_types, fidx)) {
return matrix.gidx_fvalue_map[gidx];
}
// The gradient index needs to be shifted by one as min values are not included in the
// cuts.
if (gidx == matrix.feature_segments[fidx]) {
return matrix.min_fvalue[fidx];
}
return matrix.gidx_fvalue_map[gidx - 1];
}
};
template <typename Batch>
struct DeviceAdapterLoader {
Batch batch;
bst_feature_t columns;
float* smem;
bool use_shared;
data::IsValidFunctor is_valid;
using BatchT = Batch;
XGBOOST_DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared,
bst_feature_t num_features, bst_row_t num_rows,
size_t entry_start, float missing) :
batch{batch},
columns{num_features},
use_shared{use_shared},
is_valid{missing} {
extern __shared__ float _smem[];
smem = _smem;
if (use_shared) {
uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t shared_elements = blockDim.x * num_features;
dh::BlockFill(smem, shared_elements, nanf(""));
__syncthreads();
if (global_idx < num_rows) {
auto beg = global_idx * columns;
auto end = (global_idx + 1) * columns;
for (size_t i = beg; i < end; ++i) {
auto value = batch.GetElement(i).value;
if (is_valid(value)) {
smem[threadIdx.x * num_features + (i - beg)] = value;
}
}
}
}
__syncthreads();
}
XGBOOST_DEV_INLINE float GetElement(size_t ridx, size_t fidx) const {
if (use_shared) {
return smem[threadIdx.x * columns + fidx];
}
auto value = batch.GetElement(ridx * columns + fidx).value;
if (is_valid(value)) {
return value;
} else {
return nan("");
}
}
};
template <bool has_missing, bool has_categorical, typename Loader>
__device__ bst_node_t GetLeafIndex(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = 0;
RegTree::Node n = tree.d_tree[nidx];
while (!n.IsLeaf()) {
float fvalue = loader->GetElement(ridx, n.SplitIndex());
bool is_missing = common::CheckNAN(fvalue);
nidx = GetNextNode<has_missing, has_categorical>(n, nidx, fvalue,
is_missing, tree.cats);
n = tree.d_tree[nidx];
}
return nidx;
}
template <bool has_missing, typename Loader>
__device__ float GetLeafWeight(bst_row_t ridx, TreeView const &tree,
Loader *loader) {
bst_node_t nidx = -1;
if (tree.HasCategoricalSplit()) {
nidx = GetLeafIndex<has_missing, true>(ridx, tree, loader);
} else {
nidx = GetLeafIndex<has_missing, false>(ridx, tree, loader);
}
return tree.d_tree[nidx].LeafValue();
}
template <typename Loader, typename Data>
__global__ void
PredictLeafKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories,
size_t tree_begin, size_t tree_end, size_t num_features,
size_t num_rows, size_t entry_start, bool use_shared,
float missing) {
bst_row_t ridx = blockDim.x * blockIdx.x + threadIdx.x;
if (ridx >= num_rows) {
return;
}
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
for (size_t tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_node_t leaf = -1;
if (d_tree.HasCategoricalSplit()) {
leaf = GetLeafIndex<true, true>(ridx, d_tree, &loader);
} else {
leaf = GetLeafIndex<true, false>(ridx, d_tree, &loader);
}
d_out_predictions[ridx * (tree_end - tree_begin) + tree_idx] = leaf;
}
}
template <typename Loader, typename Data, bool has_missing = true>
__global__ void
PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes,
common::Span<float> d_out_predictions,
common::Span<size_t const> d_tree_segments,
common::Span<int const> d_tree_group,
common::Span<FeatureType const> d_tree_split_types,
common::Span<uint32_t const> d_cat_tree_segments,
common::Span<RegTree::Segment const> d_cat_node_segments,
common::Span<uint32_t const> d_categories, size_t tree_begin,
size_t tree_end, size_t num_features, size_t num_rows,
size_t entry_start, bool use_shared, int num_group, float missing) {
bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x;
Loader loader(data, use_shared, num_features, num_rows, entry_start, missing);
if (global_idx >= num_rows) return;
if (num_group == 1) {
float sum = 0;
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
float leaf = GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
sum += leaf;
}
d_out_predictions[global_idx] += sum;
} else {
for (size_t tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
int tree_group = d_tree_group[tree_idx];
TreeView d_tree{
tree_begin, tree_idx, d_nodes,
d_tree_segments, d_tree_split_types, d_cat_tree_segments,
d_cat_node_segments, d_categories};
bst_uint out_prediction_idx = global_idx * num_group + tree_group;
d_out_predictions[out_prediction_idx] +=
GetLeafWeight<has_missing>(global_idx, d_tree, &loader);
}
}
}
class DeviceModel {
public:
// Need to lazily construct the vectors because GPU id is only known at runtime
HostDeviceVector<RTreeNodeStat> stats;
HostDeviceVector<size_t> tree_segments;
HostDeviceVector<RegTree::Node> nodes;
HostDeviceVector<int> tree_group;
HostDeviceVector<FeatureType> split_types;
// Pointer to each tree, segmenting the node array.
HostDeviceVector<uint32_t> categories_tree_segments;
// Pointer to each node, segmenting categories array.
HostDeviceVector<RegTree::Segment> categories_node_segments;
HostDeviceVector<uint32_t> categories;
size_t tree_beg_; // NOLINT
size_t tree_end_; // NOLINT
int num_group;
void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
CHECK_EQ(model.param.size_leaf_vector, 0);
// Copy decision trees to device
tree_segments = std::move(HostDeviceVector<size_t>({}, gpu_id));
auto& h_tree_segments = tree_segments.HostVector();
h_tree_segments.reserve((tree_end - tree_begin) + 1);
size_t sum = 0;
h_tree_segments.push_back(sum);
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
sum += model.trees.at(tree_idx)->GetNodes().size();
h_tree_segments.push_back(sum);
}
nodes = std::move(HostDeviceVector<RegTree::Node>(h_tree_segments.back(), RegTree::Node(),
gpu_id));
stats = std::move(HostDeviceVector<RTreeNodeStat>(h_tree_segments.back(),
RTreeNodeStat(), gpu_id));
auto d_nodes = nodes.DevicePointer();
auto d_stats = stats.DevicePointer();
for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) {
auto& src_nodes = model.trees.at(tree_idx)->GetNodes();
auto& src_stats = model.trees.at(tree_idx)->GetStats();
dh::safe_cuda(cudaMemcpyAsync(
d_nodes + h_tree_segments[tree_idx - tree_begin], src_nodes.data(),
sizeof(RegTree::Node) * src_nodes.size(), cudaMemcpyDefault));
dh::safe_cuda(cudaMemcpyAsync(
d_stats + h_tree_segments[tree_idx - tree_begin], src_stats.data(),
sizeof(RTreeNodeStat) * src_stats.size(), cudaMemcpyDefault));
}
tree_group = std::move(HostDeviceVector<int>(model.tree_info.size(), 0, gpu_id));
auto& h_tree_group = tree_group.HostVector();
std::memcpy(h_tree_group.data(), model.tree_info.data(), sizeof(int) * model.tree_info.size());
// Initialize categorical splits.
split_types.SetDevice(gpu_id);
std::vector<FeatureType>& h_split_types = split_types.HostVector();
h_split_types.resize(h_tree_segments.back());
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_st = model.trees.at(tree_idx)->GetSplitTypes();
std::copy(src_st.cbegin(), src_st.cend(),
h_split_types.begin() + h_tree_segments[tree_idx - tree_begin]);
}
categories = HostDeviceVector<uint32_t>({}, gpu_id);
categories_tree_segments = HostDeviceVector<uint32_t>(1, 0, gpu_id);
std::vector<uint32_t> &h_categories = categories.HostVector();
std::vector<uint32_t> &h_split_cat_segments = categories_tree_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const& src_cats = model.trees.at(tree_idx)->GetSplitCategories();
size_t orig_size = h_categories.size();
h_categories.resize(orig_size + src_cats.size());
std::copy(src_cats.cbegin(), src_cats.cend(),
h_categories.begin() + orig_size);
h_split_cat_segments.push_back(h_categories.size());
}
categories_node_segments =
HostDeviceVector<RegTree::Segment>(h_tree_segments.back(), {}, gpu_id);
std::vector<RegTree::Segment> &h_categories_node_segments =
categories_node_segments.HostVector();
for (auto tree_idx = tree_begin; tree_idx < tree_end; ++tree_idx) {
auto const &src_cats_ptr = model.trees.at(tree_idx)->GetSplitCategoriesPtr();
std::copy(src_cats_ptr.cbegin(), src_cats_ptr.cend(),
h_categories_node_segments.begin() +
h_tree_segments[tree_idx - tree_begin]);
}
this->tree_beg_ = tree_begin;
this->tree_end_ = tree_end;
this->num_group = model.learner_model_param->num_output_group;
}
};
struct ShapSplitCondition {
ShapSplitCondition() = default;
XGBOOST_DEVICE
ShapSplitCondition(float feature_lower_bound, float feature_upper_bound,
bool is_missing_branch, common::CatBitField cats)
: feature_lower_bound(feature_lower_bound),
feature_upper_bound(feature_upper_bound),
is_missing_branch(is_missing_branch), categories{std::move(cats)} {
assert(feature_lower_bound <= feature_upper_bound);
}
/*! Feature values >= lower and < upper flow down this path. */
float feature_lower_bound;
float feature_upper_bound;
/*! Feature value set to true flow down this path. */
common::CatBitField categories;
/*! Do missing values flow down this path? */
bool is_missing_branch;
// Does this instance flow down this path?
XGBOOST_DEVICE bool EvaluateSplit(float x) const {
// is nan
if (isnan(x)) {
return is_missing_branch;
}
if (categories.Size() != 0) {
auto cat = static_cast<uint32_t>(x);
return categories.Check(cat);
} else {
return x >= feature_lower_bound && x < feature_upper_bound;
}
}
// the &= op in bitfiled is per cuda thread, this one loops over the entire
// bitfield.
XGBOOST_DEVICE static common::CatBitField Intersect(common::CatBitField l,
common::CatBitField r) {
if (l.Data() == r.Data()) {
return l;
}
if (l.Size() > r.Size()) {
thrust::swap(l, r);
}
for (size_t i = 0; i < r.Bits().size(); ++i) {
l.Bits()[i] &= r.Bits()[i];
}
return l;
}
// Combine two split conditions on the same feature
XGBOOST_DEVICE void Merge(ShapSplitCondition other) {
// Combine duplicate features
if (categories.Size() != 0 || other.categories.Size() != 0) {
categories = Intersect(categories, other.categories);
} else {
feature_lower_bound = max(feature_lower_bound, other.feature_lower_bound);
feature_upper_bound = min(feature_upper_bound, other.feature_upper_bound);
}
is_missing_branch = is_missing_branch && other.is_missing_branch;
}
};
struct PathInfo {
int64_t leaf_position; // -1 not a leaf
size_t length;
size_t tree_idx;
};
// Transform model into path element form for GPUTreeShap
void ExtractPaths(
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>> *paths,
DeviceModel *model, dh::device_vector<uint32_t> *path_categories,
int gpu_id) {
dh::safe_cuda(cudaSetDevice(gpu_id));
auto& device_model = *model;
dh::caching_device_vector<PathInfo> info(device_model.nodes.Size());
dh::XGBCachingDeviceAllocator<PathInfo> alloc;
auto d_nodes = device_model.nodes.ConstDeviceSpan();
auto d_tree_segments = device_model.tree_segments.ConstDeviceSpan();
auto nodes_transform = dh::MakeTransformIterator<PathInfo>(
thrust::make_counting_iterator(0ull), [=] __device__(size_t idx) {
auto n = d_nodes[idx];
if (!n.IsLeaf() || n.IsDeleted()) {
return PathInfo{-1, 0, 0};
}
size_t tree_idx =
dh::SegmentId(d_tree_segments.begin(), d_tree_segments.end(), idx);
size_t tree_offset = d_tree_segments[tree_idx];
size_t path_length = 1;
while (!n.IsRoot()) {
n = d_nodes[n.Parent() + tree_offset];
path_length++;
}
return PathInfo{int64_t(idx), path_length, tree_idx};
});
auto end = thrust::copy_if(
thrust::cuda::par(alloc), nodes_transform,
nodes_transform + d_nodes.size(), info.begin(),
[=] __device__(const PathInfo& e) { return e.leaf_position != -1; });
info.resize(end - info.begin());
auto length_iterator = dh::MakeTransformIterator<size_t>(
info.begin(),
[=] __device__(const PathInfo& info) { return info.length; });
dh::caching_device_vector<size_t> path_segments(info.size() + 1);
thrust::exclusive_scan(thrust::cuda::par(alloc), length_iterator,
length_iterator + info.size() + 1,
path_segments.begin());
paths->resize(path_segments.back());
auto d_paths = dh::ToSpan(*paths);
auto d_info = info.data().get();
auto d_stats = device_model.stats.ConstDeviceSpan();
auto d_tree_group = device_model.tree_group.ConstDeviceSpan();
auto d_path_segments = path_segments.data().get();
auto d_split_types = device_model.split_types.ConstDeviceSpan();
auto d_cat_segments = device_model.categories_tree_segments.ConstDeviceSpan();
auto d_cat_node_segments = device_model.categories_node_segments.ConstDeviceSpan();
size_t max_cat = 0;
if (thrust::any_of(dh::tbegin(d_split_types), dh::tend(d_split_types),
common::IsCatOp{})) {
dh::PinnedMemory pinned;
auto h_max_cat = pinned.GetSpan<RegTree::Segment>(1);
auto max_elem_it = dh::MakeTransformIterator<size_t>(
dh::tbegin(d_cat_node_segments),
[] __device__(RegTree::Segment seg) { return seg.size; });
size_t max_cat_it =
thrust::max_element(thrust::device, max_elem_it,
max_elem_it + d_cat_node_segments.size()) -
max_elem_it;
dh::safe_cuda(cudaMemcpy(h_max_cat.data(),
d_cat_node_segments.data() + max_cat_it,
h_max_cat.size_bytes(), cudaMemcpyDeviceToHost));
max_cat = h_max_cat[0].size;
CHECK_GE(max_cat, 1);
path_categories->resize(max_cat * paths->size());
}
auto d_model_categories = device_model.categories.DeviceSpan();
common::Span<uint32_t> d_path_categories = dh::ToSpan(*path_categories);
dh::LaunchN(info.size(), [=] __device__(size_t idx) {
auto path_info = d_info[idx];
size_t tree_offset = d_tree_segments[path_info.tree_idx];
TreeView tree{0, path_info.tree_idx, d_nodes,
d_tree_segments, d_split_types, d_cat_segments,
d_cat_node_segments, d_model_categories};
int group = d_tree_group[path_info.tree_idx];
size_t child_idx = path_info.leaf_position;
auto child = d_nodes[child_idx];
float v = child.LeafValue();
const float inf = std::numeric_limits<float>::infinity();
size_t output_position = d_path_segments[idx + 1] - 1;
while (!child.IsRoot()) {
size_t parent_idx = tree_offset + child.Parent();
double child_cover = d_stats[child_idx].sum_hess;
double parent_cover = d_stats[parent_idx].sum_hess;
double zero_fraction = child_cover / parent_cover;
auto parent = tree.d_tree[child.Parent()];
bool is_left_path = (tree_offset + parent.LeftChild()) == child_idx;
bool is_missing_path = (!parent.DefaultLeft() && !is_left_path) ||
(parent.DefaultLeft() && is_left_path);
float lower_bound = -inf;
float upper_bound = inf;
common::CatBitField bits;
if (common::IsCat(tree.cats.split_type, child.Parent())) {
auto path_cats = d_path_categories.subspan(max_cat * output_position, max_cat);
size_t size = tree.cats.node_ptr[child.Parent()].size;
auto node_cats = tree.cats.categories.subspan(tree.cats.node_ptr[child.Parent()].beg, size);
SPAN_CHECK(path_cats.size() >= node_cats.size());
for (size_t i = 0; i < node_cats.size(); ++i) {
path_cats[i] = is_left_path ? ~node_cats[i] : node_cats[i];
}
bits = common::CatBitField{path_cats};
} else {
lower_bound = is_left_path ? -inf : parent.SplitCond();
upper_bound = is_left_path ? parent.SplitCond() : inf;
}
d_paths[output_position--] =
gpu_treeshap::PathElement<ShapSplitCondition>{
idx, parent.SplitIndex(),
group, ShapSplitCondition{lower_bound, upper_bound, is_missing_path, bits},
zero_fraction, v};
child_idx = parent_idx;
child = parent;
}
// Root node has feature -1
d_paths[output_position] = {idx, -1, group, ShapSplitCondition{-inf, inf, false, {}}, 1.0, v};
});
}
namespace {
template <size_t kBlockThreads>
size_t SharedMemoryBytes(size_t cols, size_t max_shared_memory_bytes) {
// No way max_shared_memory_bytes that is equal to 0.
CHECK_GT(max_shared_memory_bytes, 0);
size_t shared_memory_bytes =
static_cast<size_t>(sizeof(float) * cols * kBlockThreads);
if (shared_memory_bytes > max_shared_memory_bytes) {
shared_memory_bytes = 0;
}
return shared_memory_bytes;
}
} // anonymous namespace
class GPUPredictor : public xgboost::Predictor {
private:
void PredictInternal(const SparsePage& batch,
DeviceModel const& model,
size_t num_features,
HostDeviceVector<bst_float>* predictions,
size_t batch_offset, bool is_dense) const {
batch.offset.SetDevice(ctx_->gpu_id);
batch.data.SetDevice(ctx_->gpu_id);
const uint32_t BLOCK_THREADS = 128;
size_t num_rows = batch.Size();
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(num_features, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
SparsePageView data(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
num_features);
auto const kernel = [&](auto predict_fn) {
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
predict_fn, data, model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(),
model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
num_features, num_rows, entry_start, use_shared, model.num_group,
nan(""));
};
if (is_dense) {
kernel(PredictKernel<SparsePageLoader, SparsePageView, false>);
} else {
kernel(PredictKernel<SparsePageLoader, SparsePageView, true>);
}
}
void PredictInternal(EllpackDeviceAccessor const& batch,
DeviceModel const& model,
HostDeviceVector<bst_float>* out_preds,
size_t batch_offset) const {
const uint32_t BLOCK_THREADS = 256;
size_t num_rows = batch.n_rows;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS));
DeviceModel d_model;
bool use_shared = false;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} (
PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch,
model.nodes.ConstDeviceSpan(), out_preds->DeviceSpan().subspan(batch_offset),
model.tree_segments.ConstDeviceSpan(), model.tree_group.ConstDeviceSpan(),
model.split_types.ConstDeviceSpan(),
model.categories_tree_segments.ConstDeviceSpan(),
model.categories_node_segments.ConstDeviceSpan(),
model.categories.ConstDeviceSpan(), model.tree_beg_, model.tree_end_,
batch.NumFeatures(), num_rows, entry_start, use_shared,
model.num_group, nan(""));
}
void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds,
const gbm::GBTreeModel& model, size_t tree_begin,
size_t tree_end) const {
if (tree_end - tree_begin == 0) {
return;
}
out_preds->SetDevice(ctx_->gpu_id);
auto const& info = dmat->Info();
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, ctx_->gpu_id);
if (dmat->PageExists<SparsePage>()) {
size_t batch_offset = 0;
for (auto &batch : dmat->GetBatches<SparsePage>()) {
this->PredictInternal(batch, d_model, model.learner_model_param->num_feature,
out_preds, batch_offset, dmat->IsDense());
batch_offset += batch.Size() * model.learner_model_param->num_output_group;
}
} else {
size_t batch_offset = 0;
for (auto const& page : dmat->GetBatches<EllpackPage>(BatchParam{})) {
dmat->Info().feature_types.SetDevice(ctx_->gpu_id);
auto feature_types = dmat->Info().feature_types.ConstDeviceSpan();
this->PredictInternal(
page.Impl()->GetDeviceAccessor(ctx_->gpu_id, feature_types),
d_model,
out_preds,
batch_offset);
batch_offset += page.Impl()->n_rows;
}
}
}
public:
explicit GPUPredictor(GenericParameter const* generic_param) :
Predictor::Predictor{generic_param} {}
~GPUPredictor() override {
if (ctx_->gpu_id >= 0 && ctx_->gpu_id < common::AllVisibleGPUs()) {
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
}
}
void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts,
const gbm::GBTreeModel& model, uint32_t tree_begin,
uint32_t tree_end = 0) const override {
int device = ctx_->gpu_id;
CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data.";
auto* out_preds = &predts->predictions;
if (tree_end == 0) {
tree_end = model.trees.size();
}
this->DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end);
}
template <typename Adapter, typename Loader>
void DispatchedInplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds,
uint32_t tree_begin, uint32_t tree_end) const {
uint32_t const output_groups = model.learner_model_param->num_output_group;
auto m = dmlc::get<std::shared_ptr<Adapter>>(x);
CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature)
<< "Number of columns in data must equal to trained model.";
CHECK_EQ(dh::CurrentDevice(), m->DeviceIdx())
<< "XGBoost is running on device: " << this->ctx_->gpu_id << ", "
<< "but data is on: " << m->DeviceIdx();
if (p_m) {
p_m->Info().num_row_ = m->NumRows();
this->InitOutPredictions(p_m->Info(), &(out_preds->predictions), model);
} else {
MetaInfo info;
info.num_row_ = m->NumRows();
this->InitOutPredictions(info, &(out_preds->predictions), model);
}
out_preds->predictions.SetDevice(m->DeviceIdx());
const uint32_t BLOCK_THREADS = 128;
auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(m->NumRows(), BLOCK_THREADS));
auto max_shared_memory_bytes = dh::MaxSharedMemory(m->DeviceIdx());
size_t shared_memory_bytes =
SharedMemoryBytes<BLOCK_THREADS>(m->NumColumns(), max_shared_memory_bytes);
DeviceModel d_model;
d_model.Init(model, tree_begin, tree_end, m->DeviceIdx());
bool use_shared = shared_memory_bytes != 0;
size_t entry_start = 0;
dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} (
PredictKernel<Loader, typename Loader::BatchT>, m->Value(),
d_model.nodes.ConstDeviceSpan(), out_preds->predictions.DeviceSpan(),
d_model.tree_segments.ConstDeviceSpan(), d_model.tree_group.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(), tree_begin, tree_end, m->NumColumns(),
m->NumRows(), entry_start, use_shared, output_groups, missing);
}
bool InplacePredict(dmlc::any const &x, std::shared_ptr<DMatrix> p_m,
const gbm::GBTreeModel &model, float missing,
PredictionCacheEntry *out_preds, uint32_t tree_begin,
unsigned tree_end) const override {
if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) {
this->DispatchedInplacePredict<
data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) {
this->DispatchedInplacePredict<
data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>(
x, p_m, model, missing, out_preds, tree_begin, tree_end);
} else {
return false;
}
return true;
}
void PredictContribution(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model, unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate, int,
unsigned) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShap<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
dh::LaunchN(
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
phis[(idx + 1) * contributions_columns - 1] +=
margin.empty() ? base_score : margin[idx];
});
}
void PredictInteractionContributions(DMatrix* p_fmat,
HostDeviceVector<bst_float>* out_contribs,
const gbm::GBTreeModel& model,
unsigned tree_end,
std::vector<bst_float> const* tree_weights,
bool approximate) const override {
std::string not_implemented{"contribution is not implemented in GPU "
"predictor, use `cpu_predictor` instead."};
if (approximate) {
LOG(FATAL) << "Approximated " << not_implemented;
}
if (tree_weights != nullptr) {
LOG(FATAL) << "Dart booster feature " << not_implemented;
}
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
out_contribs->SetDevice(ctx_->gpu_id);
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
const int ngroup = model.learner_model_param->num_output_group;
CHECK_NE(ngroup, 0);
// allocate space for (number of features + bias) times the number of rows
size_t contributions_columns =
model.learner_model_param->num_feature + 1; // +1 for bias
out_contribs->Resize(p_fmat->Info().num_row_ * contributions_columns *
contributions_columns *
model.learner_model_param->num_output_group);
out_contribs->Fill(0.0f);
auto phis = out_contribs->DeviceSpan();
dh::device_vector<gpu_treeshap::PathElement<ShapSplitCondition>>
device_paths;
DeviceModel d_model;
d_model.Init(model, 0, tree_end, ctx_->gpu_id);
dh::device_vector<uint32_t> categories;
ExtractPaths(&device_paths, &d_model, &categories, ctx_->gpu_id);
for (auto& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
SparsePageView X(batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature);
auto begin = dh::tbegin(phis) + batch.base_rowid * contributions_columns;
gpu_treeshap::GPUTreeShapInteractions<dh::XGBDeviceAllocator<int>>(
X, device_paths.begin(), device_paths.end(), ngroup, begin,
dh::tend(phis));
}
// Add the base margin term to last column
p_fmat->Info().base_margin_.SetDevice(ctx_->gpu_id);
const auto margin = p_fmat->Info().base_margin_.Data()->ConstDeviceSpan();
float base_score = model.learner_model_param->base_score;
size_t n_features = model.learner_model_param->num_feature;
dh::LaunchN(
p_fmat->Info().num_row_ * model.learner_model_param->num_output_group,
[=] __device__(size_t idx) {
size_t group = idx % ngroup;
size_t row_idx = idx / ngroup;
phis[gpu_treeshap::IndexPhiInteractions(
row_idx, ngroup, group, n_features, n_features, n_features)] +=
margin.empty() ? base_score : margin[idx];
});
}
void PredictInstance(const SparsePage::Inst&,
std::vector<bst_float>*,
const gbm::GBTreeModel&, unsigned) const override {
LOG(FATAL) << "[Internal error]: " << __func__
<< " is not implemented in GPU Predictor.";
}
void PredictLeaf(DMatrix *p_fmat, HostDeviceVector<bst_float> *predictions,
const gbm::GBTreeModel &model,
unsigned tree_end) const override {
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
auto max_shared_memory_bytes = ConfigureDevice(ctx_->gpu_id);
const MetaInfo& info = p_fmat->Info();
constexpr uint32_t kBlockThreads = 128;
size_t shared_memory_bytes = SharedMemoryBytes<kBlockThreads>(
info.num_col_, max_shared_memory_bytes);
bool use_shared = shared_memory_bytes != 0;
bst_feature_t num_features = info.num_col_;
bst_row_t num_rows = info.num_row_;
size_t entry_start = 0;
if (tree_end == 0 || tree_end > model.trees.size()) {
tree_end = static_cast<uint32_t>(model.trees.size());
}
predictions->SetDevice(ctx_->gpu_id);
predictions->Resize(num_rows * tree_end);
DeviceModel d_model;
d_model.Init(model, 0, tree_end, this->ctx_->gpu_id);
if (p_fmat->PageExists<SparsePage>()) {
for (auto const& batch : p_fmat->GetBatches<SparsePage>()) {
batch.data.SetDevice(ctx_->gpu_id);
batch.offset.SetDevice(ctx_->gpu_id);
bst_row_t batch_offset = 0;
SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan(),
model.learner_model_param->num_feature};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<SparsePageLoader, SparsePageView>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>(BatchParam{})) {
bst_row_t batch_offset = 0;
EllpackDeviceAccessor data{batch.Impl()->GetDeviceAccessor(ctx_->gpu_id)};
size_t num_rows = batch.Size();
auto grid =
static_cast<uint32_t>(common::DivRoundUp(num_rows, kBlockThreads));
dh::LaunchKernel {grid, kBlockThreads, shared_memory_bytes} (
PredictLeafKernel<EllpackLoader, EllpackDeviceAccessor>, data,
d_model.nodes.ConstDeviceSpan(),
predictions->DeviceSpan().subspan(batch_offset),
d_model.tree_segments.ConstDeviceSpan(),
d_model.split_types.ConstDeviceSpan(),
d_model.categories_tree_segments.ConstDeviceSpan(),
d_model.categories_node_segments.ConstDeviceSpan(),
d_model.categories.ConstDeviceSpan(),
d_model.tree_beg_, d_model.tree_end_, num_features, num_rows,
entry_start, use_shared, nan(""));
batch_offset += batch.Size();
}
}
}
void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override {
Predictor::Configure(cfg);
}
private:
/*! \brief Reconfigure the device when GPU is changed. */
static size_t ConfigureDevice(int device) {
if (device >= 0) {
return dh::MaxSharedMemory(device);
}
return 0;
}
};
XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor")
.describe("Make predictions using GPU.")
.set_body([](GenericParameter const* generic_param) {
return new GPUPredictor(generic_param);
});
} // namespace predictor
} // namespace xgboost
|
sgeaxpy.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zgeaxpy.cu, normal z -> s, Sun Nov 20 20:20:39 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
sgeaxpy_kernel(
int num_rows,
int num_cols,
float alpha,
float * dx,
float beta,
float * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is a dense matrix (vector block) stored in
magma_s_matrix format.
Arguments
---------
@param[in]
alpha float
scalar multiplier.
@param[in]
X magma_s_matrix
input/output matrix Y.
@param[in]
beta float
scalar multiplier.
@param[in,out]
Y magma_s_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C"
magma_int_t
magma_sgeaxpy(
float alpha,
magma_s_matrix X,
float beta,
magma_s_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sgeaxpy_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, X.dval, beta, Y->dval );
return MAGMA_SUCCESS;
}
| sgeaxpy.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zgeaxpy.cu, normal z -> s, Sun Nov 20 20:20:39 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
sgeaxpy_kernel(
int num_rows,
int num_cols,
float alpha,
float * dx,
float beta,
float * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is a dense matrix (vector block) stored in
magma_s_matrix format.
Arguments
---------
@param[in]
alpha float
scalar multiplier.
@param[in]
X magma_s_matrix
input/output matrix Y.
@param[in]
beta float
scalar multiplier.
@param[in,out]
Y magma_s_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C"
magma_int_t
magma_sgeaxpy(
float alpha,
magma_s_matrix X,
float beta,
magma_s_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
sgeaxpy_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, X.dval, beta, Y->dval );
return MAGMA_SUCCESS;
}
|
6a1608033a5aa5c5e99dd18cb1aaa172547172ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <Windows.h>
#include <iostream>
#include <ctime>
const int SIZE_OF_BLOCK = 16;
void Error(hipError_t err) {
if (err != hipSuccess) {
printf("%s\n", hipGetErrorString(err));
exit(-1);
}
}
__global__ void kernel() {
}
__global__ void matMult(int *A, int *B, int *C, int N) {
int A_begin = N * SIZE_OF_BLOCK;
A_begin = A_begin * blockIdx.y;
int A_end = A_begin + N - 1;
int B_begin = SIZE_OF_BLOCK;
B_begin = B_begin * blockIdx.x;
int A_step = SIZE_OF_BLOCK;
int B_step = SIZE_OF_BLOCK * N;
int sum = 0;
/*if(blockIdx.x == 1 && blockIdx.y == 1)
printf("A begin is %d, A end is %d, B begin is %d\n", A_beg, A_end, B_beg);*/
__shared__ int A_shared[SIZE_OF_BLOCK][SIZE_OF_BLOCK];
__shared__ int B_shared[SIZE_OF_BLOCK][SIZE_OF_BLOCK];
for (int i_A = A_begin, i_B = B_begin; i_A <= A_end; i_A += A_step, i_B += B_step) {
A_shared[threadIdx.y][threadIdx.x] = A[i_A + N * threadIdx.y + threadIdx.x];
B_shared[threadIdx.y][threadIdx.x] = B[i_B + N * threadIdx.y + threadIdx.x];
__syncthreads();
for (int k = 0; k < SIZE_OF_BLOCK; k++) {
sum += A_shared[threadIdx.y][k] * B_shared[k][threadIdx.x];
}
__syncthreads();
C[N * SIZE_OF_BLOCK * blockIdx.y + SIZE_OF_BLOCK * blockIdx.x + N * threadIdx.y + threadIdx.x] = sum;
}
}
int main() {
srand(time(0));
int N = 1000;
std::cout << "Size " << N << std::endl;
dim3 block(N / SIZE_OF_BLOCK, N / SIZE_OF_BLOCK);
dim3 threads(SIZE_OF_BLOCK, SIZE_OF_BLOCK);
int *dev_c, *dev_a, *dev_b;
int *a, *b, *c;
a = new int[N * N];
b = new int[N * N];
c = new int[N * N];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
a[i*N + j] = rand() % 10;
b[i*N + j] = rand() % 10;
}
}
/*std::cout << "A" << std::endl;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
std::cout << a[i*N + j] << " ";
}
std::cout << std::endl;
}
std::cout << "B" << std::endl;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
std::cout << b[i*N + j] << " ";
}
std::cout << std::endl;
}*/
kernel << <block, threads >> > ();
int time = GetTickCount();
Error(hipMalloc((void **)&dev_c, sizeof(int) * N * N));
Error(hipMalloc((void **)&dev_b, sizeof(int) * N * N));
Error(hipMalloc((void **)&dev_a, sizeof(int) * N * N));
Error(hipMemcpy(dev_a, a, sizeof(int) * N * N, hipMemcpyHostToDevice));
Error(hipMemcpy(dev_b, b, sizeof(int) * N * N, hipMemcpyHostToDevice));
//kernel << <block, threads >> > (dev_a, dev_b, dev_c, N);
matMult << <block, threads >> > (dev_a, dev_b, dev_c, N);
Error(hipGetLastError());
Error(hipMemcpy(c, dev_c, sizeof(int) * N * N, hipMemcpyDeviceToHost));
std::cout << "Time: " << GetTickCount() - time << std::endl;
/*std::cout << "C" << std::endl;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
std::cout << c[i*N + j] << " ";
}
std::cout << std::endl;
}*/
Error(hipFree(dev_c));
Error(hipFree(dev_b));
Error(hipFree(dev_a));
system("pause");
return 0;
}
| 6a1608033a5aa5c5e99dd18cb1aaa172547172ba.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <Windows.h>
#include <iostream>
#include <ctime>
const int SIZE_OF_BLOCK = 16;
void Error(cudaError_t err) {
if (err != cudaSuccess) {
printf("%s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void kernel() {
}
__global__ void matMult(int *A, int *B, int *C, int N) {
int A_begin = N * SIZE_OF_BLOCK;
A_begin = A_begin * blockIdx.y;
int A_end = A_begin + N - 1;
int B_begin = SIZE_OF_BLOCK;
B_begin = B_begin * blockIdx.x;
int A_step = SIZE_OF_BLOCK;
int B_step = SIZE_OF_BLOCK * N;
int sum = 0;
/*if(blockIdx.x == 1 && blockIdx.y == 1)
printf("A begin is %d, A end is %d, B begin is %d\n", A_beg, A_end, B_beg);*/
__shared__ int A_shared[SIZE_OF_BLOCK][SIZE_OF_BLOCK];
__shared__ int B_shared[SIZE_OF_BLOCK][SIZE_OF_BLOCK];
for (int i_A = A_begin, i_B = B_begin; i_A <= A_end; i_A += A_step, i_B += B_step) {
A_shared[threadIdx.y][threadIdx.x] = A[i_A + N * threadIdx.y + threadIdx.x];
B_shared[threadIdx.y][threadIdx.x] = B[i_B + N * threadIdx.y + threadIdx.x];
__syncthreads();
for (int k = 0; k < SIZE_OF_BLOCK; k++) {
sum += A_shared[threadIdx.y][k] * B_shared[k][threadIdx.x];
}
__syncthreads();
C[N * SIZE_OF_BLOCK * blockIdx.y + SIZE_OF_BLOCK * blockIdx.x + N * threadIdx.y + threadIdx.x] = sum;
}
}
int main() {
srand(time(0));
int N = 1000;
std::cout << "Size " << N << std::endl;
dim3 block(N / SIZE_OF_BLOCK, N / SIZE_OF_BLOCK);
dim3 threads(SIZE_OF_BLOCK, SIZE_OF_BLOCK);
int *dev_c, *dev_a, *dev_b;
int *a, *b, *c;
a = new int[N * N];
b = new int[N * N];
c = new int[N * N];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
a[i*N + j] = rand() % 10;
b[i*N + j] = rand() % 10;
}
}
/*std::cout << "A" << std::endl;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
std::cout << a[i*N + j] << " ";
}
std::cout << std::endl;
}
std::cout << "B" << std::endl;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
std::cout << b[i*N + j] << " ";
}
std::cout << std::endl;
}*/
kernel << <block, threads >> > ();
int time = GetTickCount();
Error(cudaMalloc((void **)&dev_c, sizeof(int) * N * N));
Error(cudaMalloc((void **)&dev_b, sizeof(int) * N * N));
Error(cudaMalloc((void **)&dev_a, sizeof(int) * N * N));
Error(cudaMemcpy(dev_a, a, sizeof(int) * N * N, cudaMemcpyHostToDevice));
Error(cudaMemcpy(dev_b, b, sizeof(int) * N * N, cudaMemcpyHostToDevice));
//kernel << <block, threads >> > (dev_a, dev_b, dev_c, N);
matMult << <block, threads >> > (dev_a, dev_b, dev_c, N);
Error(cudaGetLastError());
Error(cudaMemcpy(c, dev_c, sizeof(int) * N * N, cudaMemcpyDeviceToHost));
std::cout << "Time: " << GetTickCount() - time << std::endl;
/*std::cout << "C" << std::endl;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
std::cout << c[i*N + j] << " ";
}
std::cout << std::endl;
}*/
Error(cudaFree(dev_c));
Error(cudaFree(dev_b));
Error(cudaFree(dev_a));
system("pause");
return 0;
}
|
53cdf2da679363734a6408574aa3c96a19a8893d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2021-2023, The Neko Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the authors nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "cdtp_kernel.h"
#include <device/device_config.h>
#include <device/cuda/check.h>
extern "C" {
#include <common/neko_log.h>
}
template < const int >
int tune_cdtp(void *dtx, void *x,
void *dr, void *ds, void *dt,
void *dxt, void *dyt, void *dzt,
void *B, void *jac, int *nel, int *lx);
extern "C" {
/**
* Fortran wrapper for device cuda \f$ D^T X \f$
*/
void cuda_cdtp(void *dtx, void *x,
void *dr, void *ds, void *dt,
void *dxt, void *dyt, void *dzt,
void *B, void *jac, int *nel, int *lx) {
static int autotune[17] = { 0 };
const dim3 nthrds_1d(1024, 1, 1);
const dim3 nthrds_kstep((*lx), (*lx), 1);
const dim3 nblcks((*nel), 1, 1);
const hipStream_t stream = (hipStream_t) glb_cmd_queue;
#define CASE_1D(LX) \
hipLaunchKernelGGL(( cdtp_kernel_1d<real, LX, 1024>) \
, dim3(nblcks), dim3(nthrds_1d), 0, stream, (real *) dtx, (real *) x, \
(real *) dr, (real *) ds, (real *) dt, \
(real *) dxt, (real *) dyt, (real *) dzt, \
(real *) B, (real *) jac); \
CUDA_CHECK(hipGetLastError());
#define CASE_KSTEP(LX) \
hipLaunchKernelGGL(( cdtp_kernel_kstep<real, LX>) \
, dim3(nblcks), dim3(nthrds_kstep), 0, stream, (real *) dtx, (real *) x, \
(real *) dr, (real *) ds, (real *) dt, \
(real *) dxt, (real *) dyt, (real *) dzt, \
(real *) B, (real *) jac); \
CUDA_CHECK(hipGetLastError());
#define CASE(LX) \
case LX: \
if(autotune[LX] == 0 ) { \
autotune[LX]=tune_cdtp<LX>(dtx, x, \
dr, ds, dt, \
dxt, dyt, dzt, \
B, jac, nel, lx); \
} else if (autotune[LX] == 1 ) { \
CASE_1D(LX); \
} else if (autotune[LX] == 2 ) { \
CASE_KSTEP(LX); \
} \
break
#define CASE_LARGE(LX) \
case LX: \
CASE_KSTEP(LX); \
break
if ((*lx) < 13) {
switch(*lx) {
CASE(2);
CASE(3);
CASE(4);
CASE(5);
CASE(6);
CASE(7);
CASE(8);
CASE(9);
CASE(10);
CASE(11);
CASE(12);
default:
{
fprintf(stderr, __FILE__ ": size not supported: %d\n", *lx);
exit(1);
}
}
}
else {
switch(*lx) {
CASE_LARGE(13);
CASE_LARGE(14);
CASE_LARGE(15);
CASE_LARGE(16);
default:
{
fprintf(stderr, __FILE__ ": size not supported: %d\n", *lx);
exit(1);
}
}
}
}
}
template < const int LX >
int tune_cdtp(void *dtx, void *x,
void *dr, void *ds, void *dt,
void *dxt, void *dyt, void *dzt,
void *B, void *jac, int *nel, int *lx) {
hipEvent_t start,stop;
float time1,time2;
int retval;
const dim3 nthrds_1d(1024, 1, 1);
const dim3 nthrds_kstep((*lx), (*lx), 1);
const dim3 nblcks((*nel), 1, 1);
const hipStream_t stream = (hipStream_t) glb_cmd_queue;
char *env_value = NULL;
char neko_log_buf[80];
env_value=getenv("NEKO_AUTOTUNE");
sprintf(neko_log_buf, "Autotune cdtp (lx: %d)", *lx);
log_section(neko_log_buf);
if(env_value) {
if( !strcmp(env_value,"1D") ) {
CASE_1D(LX);
sprintf(neko_log_buf,"Set by env : 1 (1D)");
log_message(neko_log_buf);
log_end_section();
return 1;
} else if( !strcmp(env_value,"KSTEP") ) {
CASE_KSTEP(LX);
sprintf(neko_log_buf,"Set by env : 2 (KSTEP)");
log_message(neko_log_buf);
log_end_section();
return 2;
} else {
sprintf(neko_log_buf, "Invalid value set for NEKO_AUTOTUNE");
log_error(neko_log_buf);
}
}
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
for(int i = 0; i < 100; i++) {
CASE_1D(LX);
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time1, start, stop);
hipEventRecord(start,0);
for(int i = 0; i < 100; i++) {
CASE_KSTEP(LX);
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time2, start, stop);
if(time1 < time2) {
retval = 1;
} else {
retval = 2;
}
sprintf(neko_log_buf, "Chose : %d (%s)", retval,
(retval > 1 ? "KSTEP" : "1D"));
log_message(neko_log_buf);
log_end_section();
return retval;
}
| 53cdf2da679363734a6408574aa3c96a19a8893d.cu | /*
Copyright (c) 2021-2023, The Neko Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the authors nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "cdtp_kernel.h"
#include <device/device_config.h>
#include <device/cuda/check.h>
extern "C" {
#include <common/neko_log.h>
}
template < const int >
int tune_cdtp(void *dtx, void *x,
void *dr, void *ds, void *dt,
void *dxt, void *dyt, void *dzt,
void *B, void *jac, int *nel, int *lx);
extern "C" {
/**
* Fortran wrapper for device cuda \f$ D^T X \f$
*/
void cuda_cdtp(void *dtx, void *x,
void *dr, void *ds, void *dt,
void *dxt, void *dyt, void *dzt,
void *B, void *jac, int *nel, int *lx) {
static int autotune[17] = { 0 };
const dim3 nthrds_1d(1024, 1, 1);
const dim3 nthrds_kstep((*lx), (*lx), 1);
const dim3 nblcks((*nel), 1, 1);
const cudaStream_t stream = (cudaStream_t) glb_cmd_queue;
#define CASE_1D(LX) \
cdtp_kernel_1d<real, LX, 1024> \
<<<nblcks, nthrds_1d, 0, stream>>>((real *) dtx, (real *) x, \
(real *) dr, (real *) ds, (real *) dt, \
(real *) dxt, (real *) dyt, (real *) dzt, \
(real *) B, (real *) jac); \
CUDA_CHECK(cudaGetLastError());
#define CASE_KSTEP(LX) \
cdtp_kernel_kstep<real, LX> \
<<<nblcks, nthrds_kstep, 0, stream>>>((real *) dtx, (real *) x, \
(real *) dr, (real *) ds, (real *) dt, \
(real *) dxt, (real *) dyt, (real *) dzt, \
(real *) B, (real *) jac); \
CUDA_CHECK(cudaGetLastError());
#define CASE(LX) \
case LX: \
if(autotune[LX] == 0 ) { \
autotune[LX]=tune_cdtp<LX>(dtx, x, \
dr, ds, dt, \
dxt, dyt, dzt, \
B, jac, nel, lx); \
} else if (autotune[LX] == 1 ) { \
CASE_1D(LX); \
} else if (autotune[LX] == 2 ) { \
CASE_KSTEP(LX); \
} \
break
#define CASE_LARGE(LX) \
case LX: \
CASE_KSTEP(LX); \
break
if ((*lx) < 13) {
switch(*lx) {
CASE(2);
CASE(3);
CASE(4);
CASE(5);
CASE(6);
CASE(7);
CASE(8);
CASE(9);
CASE(10);
CASE(11);
CASE(12);
default:
{
fprintf(stderr, __FILE__ ": size not supported: %d\n", *lx);
exit(1);
}
}
}
else {
switch(*lx) {
CASE_LARGE(13);
CASE_LARGE(14);
CASE_LARGE(15);
CASE_LARGE(16);
default:
{
fprintf(stderr, __FILE__ ": size not supported: %d\n", *lx);
exit(1);
}
}
}
}
}
template < const int LX >
int tune_cdtp(void *dtx, void *x,
void *dr, void *ds, void *dt,
void *dxt, void *dyt, void *dzt,
void *B, void *jac, int *nel, int *lx) {
cudaEvent_t start,stop;
float time1,time2;
int retval;
const dim3 nthrds_1d(1024, 1, 1);
const dim3 nthrds_kstep((*lx), (*lx), 1);
const dim3 nblcks((*nel), 1, 1);
const cudaStream_t stream = (cudaStream_t) glb_cmd_queue;
char *env_value = NULL;
char neko_log_buf[80];
env_value=getenv("NEKO_AUTOTUNE");
sprintf(neko_log_buf, "Autotune cdtp (lx: %d)", *lx);
log_section(neko_log_buf);
if(env_value) {
if( !strcmp(env_value,"1D") ) {
CASE_1D(LX);
sprintf(neko_log_buf,"Set by env : 1 (1D)");
log_message(neko_log_buf);
log_end_section();
return 1;
} else if( !strcmp(env_value,"KSTEP") ) {
CASE_KSTEP(LX);
sprintf(neko_log_buf,"Set by env : 2 (KSTEP)");
log_message(neko_log_buf);
log_end_section();
return 2;
} else {
sprintf(neko_log_buf, "Invalid value set for NEKO_AUTOTUNE");
log_error(neko_log_buf);
}
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
for(int i = 0; i < 100; i++) {
CASE_1D(LX);
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time1, start, stop);
cudaEventRecord(start,0);
for(int i = 0; i < 100; i++) {
CASE_KSTEP(LX);
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time2, start, stop);
if(time1 < time2) {
retval = 1;
} else {
retval = 2;
}
sprintf(neko_log_buf, "Chose : %d (%s)", retval,
(retval > 1 ? "KSTEP" : "1D"));
log_message(neko_log_buf);
log_end_section();
return retval;
}
|
275edd6326209a2bbe6adc76da70f904de022c89.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <string.h>
#include <sys/resource.h>
#include <time.h>
#include <sys/time.h>
#define ERROR -1
#define LENGTH_ARRAY 100000
//#define TOPK 32
#define TAM_WARP 32
#define N_BLOQUES 3
#define T_per_BLOCK 512
// Global variables
int DIM,N_QUERIES,N_DB,TOPK;
FILE *Salida_Multihilo;
struct _Elem{
float dist;
int ind;
};
typedef struct _Elem Elem;
void copiavalor_DB(double **a, double *b, int j);
int leedato_DB(double *dato, FILE *file);
void copiavalor_QUERIES(double **a, double *b, int j);
int leedato_QUERIES(double *dato, FILE *file);
__device__ void pushH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id);
__device__ void popH(Elem *heap, int *n_elem, int pitch, int id, Elem *eresult);
__device__ float topH(Elem *heap, int id);
__device__ void popushH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id);
__global__ void Batch_Heap_Reduction(Elem *heap, int pitch_H, Elem *arr_Dist, int pitch_Dist, Elem *res_final);
__global__ void distancias(double *CudaConsultas, size_t pitch_Consultas, double *CudaDB, size_t pitch_DB, double *arr_Dist, int pitch_Dist){
int col, row;
double d = 0, resultado=0;
//Memoria compartida de max 49kb
__shared__ double query[N_QUERIES];
if (threadIdx.x < N_QUERIES)
query[threadIdx.x] = ((double *)((char *)CudaConsultas + threadIdx.x*(int)pitch_Consultas))[blockIdx.x];
__syncthreads();
for (col=threadIdx.x; col < N_QUERIES; col += blockDim.x){
d=0;
for (row=0; row < N_DB; row++){
resultado = query[row] - ((double *)((char *)CudaDB + row*(int)pitch_DB))[col];
d += (resultado * resultado);
}
((double *)((char *)arr_Dist + row*(int)pitch_Dist))[col] = sqrt(d);
}
}
main(int argc, char *argv[]){
int i, j;
char *ruta_db, *ruta_queries;
double **DB, **Consultas,**DB_in, **Consultas_in,*CudaDB, *CudaConsultas;
size_t pitch_DB, pitch_Consultas;
if (argc != 7){
printf("Error :: Ejecutar como : main.out archivo_BD Num_elem archivo_queries Num_queries dim TOPK\n");
return 1;
}
ruta_db = (char *)malloc(sizeof(char)*(strlen(argv[1])+1));
strcpy(ruta_db, argv[1]);
N_DB = atoi(argv[2]);
TOPK = atoi(argv[6]);
printf("%s\n",ruta_db );
ruta_queries = (char *)malloc(sizeof(char)*(strlen(argv[3])+1));
strcpy(ruta_queries, argv[3]);
N_QUERIES = atoi(argv[4]);
//printf("N_QUERIES:: -> :: %d\n",N_QUERIES );
DIM = atoi(argv[5]);
//printf("dim:: %d\n",DIM );
Elem *res_final, *res_final_H;
Elem *HEAPS_dev, *arr_Dist, **arr_Dist_H;
size_t pitch_H, pitch_Dist;
//Variable for time:
struct rusage r1, r2;
float user_time, sys_time, real_time;
struct timeval t1, t2;
FILE *f_dist, *fquery;
double datos_DB[DIM],datos_QUERIES[DIM];
if (hipSuccess != hipMallocPitch((void **)&CudaDB, &pitch_DB, N_DB * sizeof(double), DIM)){
printf("\nERROR :: hipMallocPitch :: CudaDB\n");
hipDeviceReset();
return 0;
}
if (hipSuccess != hipMallocPitch((void **)&CudaConsultas, &pitch_Consultas, N_QUERIES * sizeof(double), DIM)){
printf("\nERROR :: hipMallocPitch :: CudaConsultas\n");
hipDeviceReset();
return 0;
}
printf("\nAbriendo %s... ", argv[1]);
fflush(stdout);
f_dist = fopen(ruta_db, "r");
printf("OK\n");
fflush(stdout);
// Consultas = (double **) malloc(sizeof (double *)*N_QUERIES);
// for (i = 0; i < N_QUERIES; i++)
// Consultas[i] = (double *) malloc(sizeof (double)*DIM);
// DB = (double **) malloc(sizeof (double *)*N_DB);
// for (i = 0; i < N_DB; i++)
// DB[i] = (double *) malloc(sizeof (double)*DIM);
//DIM:: Filas
//N_QUERIES y N_DB:: Columnas
//MEMORIA PARA MATRICES
Consultas_in = (double **) malloc(sizeof (double *)*DIM);
for (i = 0; i < DIM; i++)
Consultas_in[i] = (double *) malloc(sizeof (double)*N_QUERIES);
DB_in = (double **) malloc(sizeof (double *)*DIM);
for (i = 0; i < DIM; i++)
DB_in[i] = (double *) malloc(sizeof (double)*N_DB);
//FIN MEMORIA PARA MATRICES
//LECTURA DE LA BD
for (i = 0; i < N_DB; i++) {
if (leedato_DB(datos_DB, f_dist) == ERROR || feof(f_dist)) {
printf("\n\nERROR :: N_DB mal establecido, Menos que las indicadas\n\n");
fflush(stdout);
fclose(f_dist);
break;
}
copiavalor_DB(DB_in, datos_DB,i);
}
fclose(f_dist);
printf("OK\n");
fflush(stdout);
//FIN LECTURA DB
//LECTURA DE CONSULTAS
if ((fquery = fopen(ruta_queries, "r")) == NULL)
printf("Error al abrir para lectura el archivo de queries: %s\n", ruta_queries);
else
printf("Abriendo para lectura %s\n", ruta_queries);
printf("\nCargando Consultas... ");
fflush(stdout);
for (i = 0; i < N_QUERIES; i++) {
if (leedato_QUERIES(datos_QUERIES, fquery) == ERROR || feof(fquery)) {
printf("\n\nERROR :: N_QUERIES mal establecido, Menos queries que las indicadas\n\n");
fflush(stdout);
fclose(fquery);
break;
}
copiavalor_QUERIES(Consultas_in, datos_QUERIES,i);
}
fclose(fquery);
printf("OK\n");
fflush(stdout);
//FIN DE LECTURA DE CONSULTAS
printf("\nLength of the arrays = %d" , LENGTH_ARRAY);
printf("\nTOPK = %d" , TOPK);
fflush(stdout);
printf("\nANTES\n\n");
for( i = 0; i < 10; i++ ){
for( j = 0; j < 10; j++ ){
printf("%lf ", Consultas_in[i][j]);
}
printf("\n");
}
hipMemcpy2D(CudaDB, pitch_DB, *DB_in, sizeof(double)*N_DB, sizeof(double)*N_DB, DIM, hipMemcpyHostToDevice);
hipMemcpy2D(CudaConsultas, pitch_Consultas, *Consultas_in, sizeof(double)*N_QUERIES, sizeof(double)*N_QUERIES, DIM, hipMemcpyHostToDevice);
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
hipLaunchKernelGGL(( distancias), dim3(N_QUERIES), dim3(N_DB), 0, 0, CudaConsultas, pitch_Consultas, CudaDB, pitch_DB, arr_Dist, pitch_Dist);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipMemcpy2D(*Consultas_in, sizeof(double)*N_QUERIES, CudaConsultas, pitch_Consultas, sizeof(double)*N_QUERIES, DIM, hipMemcpyDeviceToHost);
hipMemcpy2D(*DB_in, sizeof(double)*N_DB, CudaDB, pitch_DB, sizeof(double)*N_DB, DIM, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf("----------------------> Prueba <-----------------------------\n");
printf("\n");
printf("\n");
printf("\n");
printf("\nDESPUES\n\n");
for( i = 0; i < 10; i++ ){
for( j = 0; j < 10; j++ ){
printf("%lf ", Consultas_in[i][j]);
}
printf("\n");
}
/*
for( i = 0; i < DIM; i++ ){
for( j = 0; j < N_QUERIES; j++ ){
printf("%lf -", Consultas_in[i][j]);
}
printf("\n");
}
printf("----------------------> Fin Prueba <-----------------------------\n");
if (hipSuccess != hipMalloc((void **)&res_final, sizeof(Elem)*TOPK*N_BLOQUES)){
printf("\nERROR 1 :: hipMalloc\n");
hipDeviceReset();
return 0;
}
res_final_H = (Elem *)malloc(sizeof(Elem)*TOPK*N_BLOQUES);
for (i=0; i<TOPK*N_BLOQUES; i++){
res_final_H[i].ind = 0;
res_final_H[i].dist = 0;
}
if (hipSuccess != hipMemset(res_final, 0, sizeof(Elem)*TOPK*N_BLOQUES)){
printf("\nERROR 2 :: hipMemset\n");
hipDeviceReset();
return 0;
}
if (hipSuccess != hipMallocPitch((void **)&HEAPS_dev, &pitch_H, sizeof(Elem)*N_BLOQUES*T_per_BLOCK, TOPK)){
printf("\nERROR 3 :: hipMallocPitch :: Heaps_dev col=%lld :: row=%d\n", (long long)(sizeof(Elem)*N_BLOQUES*T_per_BLOCK), TOPK);
hipDeviceReset();
return 0;
}*/
/*arr_Dist_H = (Elem **)malloc(sizeof(Elem *)*N_BLOQUES);
for (i=0; i<N_BLOQUES; i++)
arr_Dist_H[i] = (Elem *)malloc(sizeof(Elem)*LENGTH_ARRAY);*/
/*
for (i=0; i<N_BLOQUES; i++)
for (j=0; j<LENGTH_ARRAY; j++){
arr_Dist_H[i][j].ind = (LENGTH_ARRAY*i) + j; //Setting an ID
arr_Dist_H[i][j].dist = (float)(0.1*(float)((LENGTH_ARRAY * i) + j));
}
if (hipSuccess != hipMallocPitch((void **)&arr_Dist, &pitch_Dist, LENGTH_ARRAY*sizeof(Elem), N_BLOQUES)){
printf("\nERROR 4 :: hipMallocPitch\n");
hipDeviceReset();
return 0;
}
for (i=0; i < N_BLOQUES; i++)
if (hipSuccess != hipMemcpy((char *)arr_Dist + (i*(int)pitch_Dist), (Elem *)(arr_Dist_H[i]), sizeof(Elem)*LENGTH_ARRAY, hipMemcpyHostToDevice)){
printf("\nERROR 5 :: hipMemcpy\n");
hipDeviceReset();
return 0;
}
hipDeviceSynchronize();
hipDeviceSynchronize();
getrusage(RUSAGE_SELF, &r1);
gettimeofday(&t1, 0);
printf("\nN_BLOQUES = %d :: T_per_BLOCK = %d\n", N_BLOQUES, T_per_BLOCK);
fflush(stdout);
Batch_Heap_Reduction<<< N_BLOQUES, T_per_BLOCK>>> (HEAPS_dev, (int)pitch_H, arr_Dist, (int)pitch_Dist, res_final);
if (hipSuccess != hipMemcpy((Elem *)res_final_H, (Elem *)res_final, sizeof(Elem)*TOPK*N_BLOQUES, hipMemcpyDeviceToHost)){
printf("\nERROR 41 :: hipMemcpy :: iteraH\n");
hipDeviceReset();
return 0;
}
hipDeviceSynchronize();
hipDeviceSynchronize();
gettimeofday(&t2, 0);
getrusage(RUSAGE_SELF, &r2);
user_time = (r2.ru_utime.tv_sec - r1.ru_utime.tv_sec) + (r2.ru_utime.tv_usec - r1.ru_utime.tv_usec)/1000000.0;
sys_time = (r2.ru_stime.tv_sec - r1.ru_stime.tv_sec) + (r2.ru_stime.tv_usec - r1.ru_stime.tv_usec)/1000000.0;
real_time = (t2.tv_sec - t1.tv_sec) + (float)(t2.tv_usec - t1.tv_usec)/1000000;
printf("\nK = %d", TOPK);
printf("\nTiempo CPU = %f", user_time + sys_time);
printf("\nTiempo Real = %f", real_time);
fflush(stdout);
for (i=0; i<N_BLOQUES; i++){
printf("\n\nResults array %d (smallest distances):", i);
for (j=TOPK*i; j<(TOPK*i)+TOPK; j++)
printf("\nind = %d :: dist = %f", res_final_H[j].ind, res_final_H[j].dist);
}
printf("\n");
hipFree(HEAPS_dev);
hipFree(arr_Dist);
hipDeviceReset();
*/
return 0;
}
/*
//Push an element 'elem' to the id-th heap stored in the id-th column of the matrix 'heap'
__device__ void pushH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id)
{
int i;
Elem temp;
((Elem *)((char *)heap + (*n_elem)*pitch))[id].dist = elem->dist;
((Elem *)((char *)heap + (*n_elem)*pitch))[id].ind = elem->ind;
(*n_elem)++;
for (i = *n_elem; i>1 && ((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist; i=i/2)
{
//Swap with the father
temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist;
temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind;
((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist;
((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].ind;
((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist = temp.dist;
((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].ind = temp.ind;
}
return;
}
//Pop an element from id-th heap stored in the id-th column of the matrix 'heap' and stores it in 'eresult'
__device__ void popH(Elem *heap, int *n_elem, int pitch, int id, Elem *eresult)
{
int i, k;
Elem temp;
eresult->dist = ((Elem *)((char *)heap+0))[id].dist;
eresult->ind = ((Elem *)((char *)heap+0))[id].ind;
((Elem *)((char *)heap+0))[id].dist = ((Elem *)((char *)heap + ((*n_elem)-1)*pitch))[id].dist;//Moving the last element to the root
((Elem *)((char *)heap+0))[id].ind = ((Elem *)((char *)heap + ((*n_elem)-1)*pitch))[id].ind;
(*n_elem)--;
i = 1;
while(2*i <= *n_elem) //while exists some child
{
k = 2*i; //left child
if(k+1 <= *n_elem && ((Elem *)((char *)heap + ((k+1)-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist)
k = k+1; //right child is the biggest
if(((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist)
break; //bigger than both childs
temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist;
temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind;
((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + (k-1)*pitch))[id].dist;
((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + (k-1)*pitch))[id].ind;
((Elem *)((char *)heap + (k-1)*pitch))[id].dist = temp.dist;
((Elem *)((char *)heap + (k-1)*pitch))[id].ind = temp.ind;
i = k; //swap with the biggest child
}
return;
}
//Returns the root of the id-th heap (stored in the id-th column)
__device__ float topH(Elem *heap, int id) //NOTE: Be careful if the heap is empty and topH is called, it will give an error
{
return ((Elem *)((char *)heap + 0))[id].dist;
}
//Pop and push in one operation
__device__ void popushH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id)
{
int i, k;
Elem temp;
((Elem *)((char *)heap+0))[id].dist = elem->dist;
((Elem *)((char *)heap+0))[id].ind = elem->ind;
i = 1;
while(2*i <= *n_elem) //while exists some child
{
k = 2*i; //left child
if(k+1 <= *n_elem && ((Elem *)((char *)heap + ((k+1)-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist)
k = k+1; //right child is the biggest
if(((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist)
break; //bigger than both childs
temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist;
temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind;
((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + (k-1)*pitch))[id].dist;
((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + (k-1)*pitch))[id].ind;
((Elem *)((char *)heap + (k-1)*pitch))[id].dist = temp.dist;
((Elem *)((char *)heap + (k-1)*pitch))[id].ind = temp.ind;
i = k; //swap with the bigger child
}
return;
}
__global__ void Batch_Heap_Reduction(Elem *heap, int pitch_H, Elem *arr_Dist, int pitch_Dist, Elem *res_final)
{
int i, j, n_elem=0, n_elemWarp=0;
int id;
Elem eresult;
__shared__ Elem matrizWarp[TOPK][TAM_WARP];
__shared__ Elem heapfin[TOPK][1];
id = threadIdx.x + (blockDim.x * blockIdx.x);
//First Step: The array to be sorted (arr_Dist) is reduced to T_per_BLOCK heaps stored in device memory
//The element of arr_Dist are distributed in a circular manner, therefore consecutive threads access to consecutive elements (consecutive memory addresses)
for(i=threadIdx.x; i < LENGTH_ARRAY; i += blockDim.x)
{
if (n_elem >= TOPK)//If the current number of elements in the heap is >= than TOPK (really never it is > than TOPK, at most it is equal to TOPK)
{
//The next if is to add an element to the heap just if that element is less than the head of the heap
if (topH(heap, id) > ((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i].dist)
popushH(heap, &(((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i]), &n_elem, pitch_H, id); //Pop and Push in one operation
}
else
pushH(heap, &(((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i]), &n_elem, pitch_H, id);
}
__syncthreads();
//Second Step: the first warp of the CUDA Block reduces the elements of the heaps (stored in device memory) to TAM_WARP heaps stored in shared memory
if (threadIdx.x < TAM_WARP)
{
for(j=id; j < blockDim.x*(blockIdx.x+1); j += TAM_WARP)
{
n_elem = TOPK;
for(i=0; i < TOPK; i++)
{
popH(heap, &n_elem, pitch_H, j, &eresult);//Getting an element from a heap in device memory
//Adding the element to the heap in shared memory (if it corresponds)
if (n_elemWarp < TOPK)
pushH(&(matrizWarp[0][0]), &eresult, &n_elemWarp, sizeof(Elem)*TAM_WARP, threadIdx.x);
else
if (topH(&(matrizWarp[0][0]), threadIdx.x) > eresult.dist)
popushH(&(matrizWarp[0][0]), &eresult, &n_elemWarp, sizeof(Elem)*TAM_WARP, threadIdx.x);
}
}
}
__syncthreads();
//Third Step: The first thread of the CUDA Block reduces the elements to one heap (stored in shared memory). The elements of this heap are the final results.
if (threadIdx.x == 0)
{
n_elem = 0;
for(j=0; j < TAM_WARP; j++)
{
for(i=0; i < TOPK; i++)
if (n_elem < TOPK)
pushH((Elem *)heapfin, &(matrizWarp[i][j]), &n_elem, sizeof(Elem), 0);
else
if (topH((Elem *)heapfin, 0) > matrizWarp[i][j].dist)
popushH((Elem *)heapfin, &(matrizWarp[i][j]), &n_elem, sizeof(Elem), 0);
}
//Writing the results
for (i=TOPK*blockIdx.x; i<TOPK*(blockIdx.x+1); i++)
popH(&(heapfin[0][0]), &n_elem, sizeof(Elem), 0, &(res_final[i]));
}
return;
}
*/
void copiavalor_DB(double **a, double *b, int j) {
int i;
for (i = 0; i < DIM; i++)
a[i][j] = b[i];
return;
}
int leedato_DB(double *dato, FILE *file) {
int i = 0;
for (i = 0; i < DIM; i++)
if (fscanf(file, "%lf", &dato[i]) < 1)
return ERROR;
return 1;
}
void copiavalor_QUERIES(double **a, double *b, int j) {
int i;
for (i = 0; i < DIM; i++)
a[i][j] = b[i];
return;
}
int leedato_QUERIES(double *dato, FILE *file) {
int i = 0;
for (i = 0; i < DIM; i++)
if (fscanf(file, "%lf", &dato[i]) < 1)
return ERROR;
return 1;
}
| 275edd6326209a2bbe6adc76da70f904de022c89.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <string.h>
#include <sys/resource.h>
#include <time.h>
#include <sys/time.h>
#define ERROR -1
#define LENGTH_ARRAY 100000
//#define TOPK 32
#define TAM_WARP 32
#define N_BLOQUES 3
#define T_per_BLOCK 512
// Global variables
int DIM,N_QUERIES,N_DB,TOPK;
FILE *Salida_Multihilo;
struct _Elem{
float dist;
int ind;
};
typedef struct _Elem Elem;
void copiavalor_DB(double **a, double *b, int j);
int leedato_DB(double *dato, FILE *file);
void copiavalor_QUERIES(double **a, double *b, int j);
int leedato_QUERIES(double *dato, FILE *file);
__device__ void pushH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id);
__device__ void popH(Elem *heap, int *n_elem, int pitch, int id, Elem *eresult);
__device__ float topH(Elem *heap, int id);
__device__ void popushH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id);
__global__ void Batch_Heap_Reduction(Elem *heap, int pitch_H, Elem *arr_Dist, int pitch_Dist, Elem *res_final);
__global__ void distancias(double *CudaConsultas, size_t pitch_Consultas, double *CudaDB, size_t pitch_DB, double *arr_Dist, int pitch_Dist){
int col, row;
double d = 0, resultado=0;
//Memoria compartida de max 49kb
__shared__ double query[N_QUERIES];
if (threadIdx.x < N_QUERIES)
query[threadIdx.x] = ((double *)((char *)CudaConsultas + threadIdx.x*(int)pitch_Consultas))[blockIdx.x];
__syncthreads();
for (col=threadIdx.x; col < N_QUERIES; col += blockDim.x){
d=0;
for (row=0; row < N_DB; row++){
resultado = query[row] - ((double *)((char *)CudaDB + row*(int)pitch_DB))[col];
d += (resultado * resultado);
}
((double *)((char *)arr_Dist + row*(int)pitch_Dist))[col] = sqrt(d);
}
}
main(int argc, char *argv[]){
int i, j;
char *ruta_db, *ruta_queries;
double **DB, **Consultas,**DB_in, **Consultas_in,*CudaDB, *CudaConsultas;
size_t pitch_DB, pitch_Consultas;
if (argc != 7){
printf("Error :: Ejecutar como : main.out archivo_BD Num_elem archivo_queries Num_queries dim TOPK\n");
return 1;
}
ruta_db = (char *)malloc(sizeof(char)*(strlen(argv[1])+1));
strcpy(ruta_db, argv[1]);
N_DB = atoi(argv[2]);
TOPK = atoi(argv[6]);
printf("%s\n",ruta_db );
ruta_queries = (char *)malloc(sizeof(char)*(strlen(argv[3])+1));
strcpy(ruta_queries, argv[3]);
N_QUERIES = atoi(argv[4]);
//printf("N_QUERIES:: -> :: %d\n",N_QUERIES );
DIM = atoi(argv[5]);
//printf("dim:: %d\n",DIM );
Elem *res_final, *res_final_H;
Elem *HEAPS_dev, *arr_Dist, **arr_Dist_H;
size_t pitch_H, pitch_Dist;
//Variable for time:
struct rusage r1, r2;
float user_time, sys_time, real_time;
struct timeval t1, t2;
FILE *f_dist, *fquery;
double datos_DB[DIM],datos_QUERIES[DIM];
if (cudaSuccess != cudaMallocPitch((void **)&CudaDB, &pitch_DB, N_DB * sizeof(double), DIM)){
printf("\nERROR :: cudaMallocPitch :: CudaDB\n");
cudaThreadExit();
return 0;
}
if (cudaSuccess != cudaMallocPitch((void **)&CudaConsultas, &pitch_Consultas, N_QUERIES * sizeof(double), DIM)){
printf("\nERROR :: cudaMallocPitch :: CudaConsultas\n");
cudaThreadExit();
return 0;
}
printf("\nAbriendo %s... ", argv[1]);
fflush(stdout);
f_dist = fopen(ruta_db, "r");
printf("OK\n");
fflush(stdout);
// Consultas = (double **) malloc(sizeof (double *)*N_QUERIES);
// for (i = 0; i < N_QUERIES; i++)
// Consultas[i] = (double *) malloc(sizeof (double)*DIM);
// DB = (double **) malloc(sizeof (double *)*N_DB);
// for (i = 0; i < N_DB; i++)
// DB[i] = (double *) malloc(sizeof (double)*DIM);
//DIM:: Filas
//N_QUERIES y N_DB:: Columnas
//MEMORIA PARA MATRICES
Consultas_in = (double **) malloc(sizeof (double *)*DIM);
for (i = 0; i < DIM; i++)
Consultas_in[i] = (double *) malloc(sizeof (double)*N_QUERIES);
DB_in = (double **) malloc(sizeof (double *)*DIM);
for (i = 0; i < DIM; i++)
DB_in[i] = (double *) malloc(sizeof (double)*N_DB);
//FIN MEMORIA PARA MATRICES
//LECTURA DE LA BD
for (i = 0; i < N_DB; i++) {
if (leedato_DB(datos_DB, f_dist) == ERROR || feof(f_dist)) {
printf("\n\nERROR :: N_DB mal establecido, Menos que las indicadas\n\n");
fflush(stdout);
fclose(f_dist);
break;
}
copiavalor_DB(DB_in, datos_DB,i);
}
fclose(f_dist);
printf("OK\n");
fflush(stdout);
//FIN LECTURA DB
//LECTURA DE CONSULTAS
if ((fquery = fopen(ruta_queries, "r")) == NULL)
printf("Error al abrir para lectura el archivo de queries: %s\n", ruta_queries);
else
printf("Abriendo para lectura %s\n", ruta_queries);
printf("\nCargando Consultas... ");
fflush(stdout);
for (i = 0; i < N_QUERIES; i++) {
if (leedato_QUERIES(datos_QUERIES, fquery) == ERROR || feof(fquery)) {
printf("\n\nERROR :: N_QUERIES mal establecido, Menos queries que las indicadas\n\n");
fflush(stdout);
fclose(fquery);
break;
}
copiavalor_QUERIES(Consultas_in, datos_QUERIES,i);
}
fclose(fquery);
printf("OK\n");
fflush(stdout);
//FIN DE LECTURA DE CONSULTAS
printf("\nLength of the arrays = %d" , LENGTH_ARRAY);
printf("\nTOPK = %d" , TOPK);
fflush(stdout);
printf("\nANTES\n\n");
for( i = 0; i < 10; i++ ){
for( j = 0; j < 10; j++ ){
printf("%lf ", Consultas_in[i][j]);
}
printf("\n");
}
cudaMemcpy2D(CudaDB, pitch_DB, *DB_in, sizeof(double)*N_DB, sizeof(double)*N_DB, DIM, cudaMemcpyHostToDevice);
cudaMemcpy2D(CudaConsultas, pitch_Consultas, *Consultas_in, sizeof(double)*N_QUERIES, sizeof(double)*N_QUERIES, DIM, cudaMemcpyHostToDevice);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
distancias<<<N_QUERIES, N_DB>>>(CudaConsultas, pitch_Consultas, CudaDB, pitch_DB, arr_Dist, pitch_Dist);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaMemcpy2D(*Consultas_in, sizeof(double)*N_QUERIES, CudaConsultas, pitch_Consultas, sizeof(double)*N_QUERIES, DIM, cudaMemcpyDeviceToHost);
cudaMemcpy2D(*DB_in, sizeof(double)*N_DB, CudaDB, pitch_DB, sizeof(double)*N_DB, DIM, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("----------------------> Prueba <-----------------------------\n");
printf("\n");
printf("\n");
printf("\n");
printf("\nDESPUES\n\n");
for( i = 0; i < 10; i++ ){
for( j = 0; j < 10; j++ ){
printf("%lf ", Consultas_in[i][j]);
}
printf("\n");
}
/*
for( i = 0; i < DIM; i++ ){
for( j = 0; j < N_QUERIES; j++ ){
printf("%lf -", Consultas_in[i][j]);
}
printf("\n");
}
printf("----------------------> Fin Prueba <-----------------------------\n");
if (cudaSuccess != cudaMalloc((void **)&res_final, sizeof(Elem)*TOPK*N_BLOQUES)){
printf("\nERROR 1 :: cudaMalloc\n");
cudaThreadExit();
return 0;
}
res_final_H = (Elem *)malloc(sizeof(Elem)*TOPK*N_BLOQUES);
for (i=0; i<TOPK*N_BLOQUES; i++){
res_final_H[i].ind = 0;
res_final_H[i].dist = 0;
}
if (cudaSuccess != cudaMemset(res_final, 0, sizeof(Elem)*TOPK*N_BLOQUES)){
printf("\nERROR 2 :: cudaMemset\n");
cudaThreadExit();
return 0;
}
if (cudaSuccess != cudaMallocPitch((void **)&HEAPS_dev, &pitch_H, sizeof(Elem)*N_BLOQUES*T_per_BLOCK, TOPK)){
printf("\nERROR 3 :: cudaMallocPitch :: Heaps_dev col=%lld :: row=%d\n", (long long)(sizeof(Elem)*N_BLOQUES*T_per_BLOCK), TOPK);
cudaThreadExit();
return 0;
}*/
/*arr_Dist_H = (Elem **)malloc(sizeof(Elem *)*N_BLOQUES);
for (i=0; i<N_BLOQUES; i++)
arr_Dist_H[i] = (Elem *)malloc(sizeof(Elem)*LENGTH_ARRAY);*/
/*
for (i=0; i<N_BLOQUES; i++)
for (j=0; j<LENGTH_ARRAY; j++){
arr_Dist_H[i][j].ind = (LENGTH_ARRAY*i) + j; //Setting an ID
arr_Dist_H[i][j].dist = (float)(0.1*(float)((LENGTH_ARRAY * i) + j));
}
if (cudaSuccess != cudaMallocPitch((void **)&arr_Dist, &pitch_Dist, LENGTH_ARRAY*sizeof(Elem), N_BLOQUES)){
printf("\nERROR 4 :: cudaMallocPitch\n");
cudaThreadExit();
return 0;
}
for (i=0; i < N_BLOQUES; i++)
if (cudaSuccess != cudaMemcpy((char *)arr_Dist + (i*(int)pitch_Dist), (Elem *)(arr_Dist_H[i]), sizeof(Elem)*LENGTH_ARRAY, cudaMemcpyHostToDevice)){
printf("\nERROR 5 :: cudaMemcpy\n");
cudaThreadExit();
return 0;
}
cudaThreadSynchronize();
cudaDeviceSynchronize();
getrusage(RUSAGE_SELF, &r1);
gettimeofday(&t1, 0);
printf("\nN_BLOQUES = %d :: T_per_BLOCK = %d\n", N_BLOQUES, T_per_BLOCK);
fflush(stdout);
Batch_Heap_Reduction<<< N_BLOQUES, T_per_BLOCK>>> (HEAPS_dev, (int)pitch_H, arr_Dist, (int)pitch_Dist, res_final);
if (cudaSuccess != cudaMemcpy((Elem *)res_final_H, (Elem *)res_final, sizeof(Elem)*TOPK*N_BLOQUES, cudaMemcpyDeviceToHost)){
printf("\nERROR 41 :: cudaMemcpy :: iteraH\n");
cudaThreadExit();
return 0;
}
cudaThreadSynchronize();
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
getrusage(RUSAGE_SELF, &r2);
user_time = (r2.ru_utime.tv_sec - r1.ru_utime.tv_sec) + (r2.ru_utime.tv_usec - r1.ru_utime.tv_usec)/1000000.0;
sys_time = (r2.ru_stime.tv_sec - r1.ru_stime.tv_sec) + (r2.ru_stime.tv_usec - r1.ru_stime.tv_usec)/1000000.0;
real_time = (t2.tv_sec - t1.tv_sec) + (float)(t2.tv_usec - t1.tv_usec)/1000000;
printf("\nK = %d", TOPK);
printf("\nTiempo CPU = %f", user_time + sys_time);
printf("\nTiempo Real = %f", real_time);
fflush(stdout);
for (i=0; i<N_BLOQUES; i++){
printf("\n\nResults array %d (smallest distances):", i);
for (j=TOPK*i; j<(TOPK*i)+TOPK; j++)
printf("\nind = %d :: dist = %f", res_final_H[j].ind, res_final_H[j].dist);
}
printf("\n");
cudaFree(HEAPS_dev);
cudaFree(arr_Dist);
cudaThreadExit();
*/
return 0;
}
/*
//Push an element 'elem' to the id-th heap stored in the id-th column of the matrix 'heap'
__device__ void pushH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id)
{
int i;
Elem temp;
((Elem *)((char *)heap + (*n_elem)*pitch))[id].dist = elem->dist;
((Elem *)((char *)heap + (*n_elem)*pitch))[id].ind = elem->ind;
(*n_elem)++;
for (i = *n_elem; i>1 && ((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist; i=i/2)
{
//Swap with the father
temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist;
temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind;
((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist;
((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].ind;
((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist = temp.dist;
((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].ind = temp.ind;
}
return;
}
//Pop an element from id-th heap stored in the id-th column of the matrix 'heap' and stores it in 'eresult'
__device__ void popH(Elem *heap, int *n_elem, int pitch, int id, Elem *eresult)
{
int i, k;
Elem temp;
eresult->dist = ((Elem *)((char *)heap+0))[id].dist;
eresult->ind = ((Elem *)((char *)heap+0))[id].ind;
((Elem *)((char *)heap+0))[id].dist = ((Elem *)((char *)heap + ((*n_elem)-1)*pitch))[id].dist;//Moving the last element to the root
((Elem *)((char *)heap+0))[id].ind = ((Elem *)((char *)heap + ((*n_elem)-1)*pitch))[id].ind;
(*n_elem)--;
i = 1;
while(2*i <= *n_elem) //while exists some child
{
k = 2*i; //left child
if(k+1 <= *n_elem && ((Elem *)((char *)heap + ((k+1)-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist)
k = k+1; //right child is the biggest
if(((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist)
break; //bigger than both childs
temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist;
temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind;
((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + (k-1)*pitch))[id].dist;
((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + (k-1)*pitch))[id].ind;
((Elem *)((char *)heap + (k-1)*pitch))[id].dist = temp.dist;
((Elem *)((char *)heap + (k-1)*pitch))[id].ind = temp.ind;
i = k; //swap with the biggest child
}
return;
}
//Returns the root of the id-th heap (stored in the id-th column)
__device__ float topH(Elem *heap, int id) //NOTE: Be careful if the heap is empty and topH is called, it will give an error
{
return ((Elem *)((char *)heap + 0))[id].dist;
}
//Pop and push in one operation
__device__ void popushH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id)
{
int i, k;
Elem temp;
((Elem *)((char *)heap+0))[id].dist = elem->dist;
((Elem *)((char *)heap+0))[id].ind = elem->ind;
i = 1;
while(2*i <= *n_elem) //while exists some child
{
k = 2*i; //left child
if(k+1 <= *n_elem && ((Elem *)((char *)heap + ((k+1)-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist)
k = k+1; //right child is the biggest
if(((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist)
break; //bigger than both childs
temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist;
temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind;
((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + (k-1)*pitch))[id].dist;
((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + (k-1)*pitch))[id].ind;
((Elem *)((char *)heap + (k-1)*pitch))[id].dist = temp.dist;
((Elem *)((char *)heap + (k-1)*pitch))[id].ind = temp.ind;
i = k; //swap with the bigger child
}
return;
}
__global__ void Batch_Heap_Reduction(Elem *heap, int pitch_H, Elem *arr_Dist, int pitch_Dist, Elem *res_final)
{
int i, j, n_elem=0, n_elemWarp=0;
int id;
Elem eresult;
__shared__ Elem matrizWarp[TOPK][TAM_WARP];
__shared__ Elem heapfin[TOPK][1];
id = threadIdx.x + (blockDim.x * blockIdx.x);
//First Step: The array to be sorted (arr_Dist) is reduced to T_per_BLOCK heaps stored in device memory
//The element of arr_Dist are distributed in a circular manner, therefore consecutive threads access to consecutive elements (consecutive memory addresses)
for(i=threadIdx.x; i < LENGTH_ARRAY; i += blockDim.x)
{
if (n_elem >= TOPK)//If the current number of elements in the heap is >= than TOPK (really never it is > than TOPK, at most it is equal to TOPK)
{
//The next if is to add an element to the heap just if that element is less than the head of the heap
if (topH(heap, id) > ((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i].dist)
popushH(heap, &(((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i]), &n_elem, pitch_H, id); //Pop and Push in one operation
}
else
pushH(heap, &(((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i]), &n_elem, pitch_H, id);
}
__syncthreads();
//Second Step: the first warp of the CUDA Block reduces the elements of the heaps (stored in device memory) to TAM_WARP heaps stored in shared memory
if (threadIdx.x < TAM_WARP)
{
for(j=id; j < blockDim.x*(blockIdx.x+1); j += TAM_WARP)
{
n_elem = TOPK;
for(i=0; i < TOPK; i++)
{
popH(heap, &n_elem, pitch_H, j, &eresult);//Getting an element from a heap in device memory
//Adding the element to the heap in shared memory (if it corresponds)
if (n_elemWarp < TOPK)
pushH(&(matrizWarp[0][0]), &eresult, &n_elemWarp, sizeof(Elem)*TAM_WARP, threadIdx.x);
else
if (topH(&(matrizWarp[0][0]), threadIdx.x) > eresult.dist)
popushH(&(matrizWarp[0][0]), &eresult, &n_elemWarp, sizeof(Elem)*TAM_WARP, threadIdx.x);
}
}
}
__syncthreads();
//Third Step: The first thread of the CUDA Block reduces the elements to one heap (stored in shared memory). The elements of this heap are the final results.
if (threadIdx.x == 0)
{
n_elem = 0;
for(j=0; j < TAM_WARP; j++)
{
for(i=0; i < TOPK; i++)
if (n_elem < TOPK)
pushH((Elem *)heapfin, &(matrizWarp[i][j]), &n_elem, sizeof(Elem), 0);
else
if (topH((Elem *)heapfin, 0) > matrizWarp[i][j].dist)
popushH((Elem *)heapfin, &(matrizWarp[i][j]), &n_elem, sizeof(Elem), 0);
}
//Writing the results
for (i=TOPK*blockIdx.x; i<TOPK*(blockIdx.x+1); i++)
popH(&(heapfin[0][0]), &n_elem, sizeof(Elem), 0, &(res_final[i]));
}
return;
}
*/
void copiavalor_DB(double **a, double *b, int j) {
int i;
for (i = 0; i < DIM; i++)
a[i][j] = b[i];
return;
}
int leedato_DB(double *dato, FILE *file) {
int i = 0;
for (i = 0; i < DIM; i++)
if (fscanf(file, "%lf", &dato[i]) < 1)
return ERROR;
return 1;
}
void copiavalor_QUERIES(double **a, double *b, int j) {
int i;
for (i = 0; i < DIM; i++)
a[i][j] = b[i];
return;
}
int leedato_QUERIES(double *dato, FILE *file) {
int i = 0;
for (i = 0; i < DIM; i++)
if (fscanf(file, "%lf", &dato[i]) < 1)
return ERROR;
return 1;
}
|
69d2adafc889723d316098bf0e475d2322f2c4d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ double complexMagnitude(double2 in){
return sqrt(in.x*in.x + in.y*in.y);
}
__global__ void complexMag_test(double2 *in, double *out){
out[0] = complexMagnitude(in[0]);
} | 69d2adafc889723d316098bf0e475d2322f2c4d5.cu | #include "includes.h"
__device__ double complexMagnitude(double2 in){
return sqrt(in.x*in.x + in.y*in.y);
}
__global__ void complexMag_test(double2 *in, double *out){
out[0] = complexMagnitude(in[0]);
} |
366f96f377079499b72273869bb7d83d50386e05.hip | // !!! This is a file automatically generated by hipify!!!
//C++
#include <time.h>
#include <iostream>
using namespace std;
//openCV
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
//CUDA
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//timer
#include "timer.hpp"
__global__ void convolution_kernel(unsigned char* in_image,short *out_image,int *H,int width,int height)
{
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
if(x<0 || x>width || y<0 || y>height)
{
return;
}
int pos = y*width+x;
if(x==0||y==0||(x==width-1)||(y==height-1))
{
out_image[pos] = in_image[pos];
return;
}
int left = pos - 1;
int right = pos + 1;
int up = pos - width;
int down = pos + width;
int up_left = up - 1;
int up_right = up + 1;
int down_left = down - 1;
int down_right = down + 1;
out_image[pos] = H[0]*in_image[up_left] + H[1]*in_image[up] + H[2]*in_image[up_right]
+H[3]*in_image[left] + H[4]*in_image[pos] + H[5]*in_image[right]
+H[6]*in_image[down_left] + H[7]*in_image[down] + H[8]*in_image[down_right];
}
int main()
{
//in data
Mat in_image = imread("test.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat out_image = Mat(in_image.size(),CV_16S);
//convolution kernel
int H[9];
H[0]=-1;H[1]=-1;H[2]=-1;
H[3]=-1;H[4]= 8;H[5]=-1;
H[6]=-1;H[7]=-1;H[8]=-1;
//calc
Timer start_time;
//init CUDA
//error status
hipError_t cuda_status;
//only chose one GPU
//init
cuda_status = hipSetDevice(0);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipSetDevice failed! Do you have a CUDA-Capable GPU installed?");
return -1;
}
//in image and out image
unsigned char * dev_in_image;
short * dev_out_image;
int *dev_H;
//size of image
int image_size = in_image.cols*in_image.rows;
//allocate memory on the GPU
cuda_status = hipMalloc((void**)&dev_in_image,sizeof(unsigned char)*image_size);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipMalloc Failed");
exit( EXIT_FAILURE );
}
cuda_status = hipMalloc((void**)&dev_out_image,sizeof(short)*image_size);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipMalloc Failed");
exit( EXIT_FAILURE );
}
cuda_status = hipMalloc((void**)&dev_H,sizeof(int)*9);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipMalloc Failed");
exit( EXIT_FAILURE );
}
//copy
cuda_status = hipMemcpy(dev_in_image,in_image.data,sizeof(unsigned char)*image_size,hipMemcpyHostToDevice);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipMemcpy Failed");
exit( EXIT_FAILURE );
}
hipMemset(dev_out_image,0,sizeof(short)*image_size);
cuda_status = hipMemcpy(dev_H,H,sizeof(int)*9,hipMemcpyHostToDevice);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipMemcpy Failed");
exit( EXIT_FAILURE );
}
dim3 threads(16,16);
dim3 grid(max((in_image.cols+threads.x-1)/threads.x,1),max((in_image.rows+threads.y-1)/threads.y,1));
hipLaunchKernelGGL(( convolution_kernel), dim3(grid),dim3(threads), 0, 0, dev_in_image,dev_out_image,dev_H,in_image.cols,in_image.rows);
//copy out
cuda_status = hipMemcpy((short*)out_image.data,dev_out_image,sizeof(short)*image_size,hipMemcpyDeviceToHost);
if(cuda_status != hipSuccess)
{
fprintf(stderr,"hipMemcpy Failed");
exit( EXIT_FAILURE );
}
hipFree(dev_in_image);
hipFree(dev_out_image);
hipFree(dev_H);
cout<<start_time.elapsedMs()<<endl;
//output
Mat abs_dst;
convertScaleAbs( out_image, abs_dst );
imwrite("cuda.jpg",abs_dst);
return 0;
} | 366f96f377079499b72273869bb7d83d50386e05.cu | //C++
#include <time.h>
#include <iostream>
using namespace std;
//openCV
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
//CUDA
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//timer
#include "timer.hpp"
__global__ void convolution_kernel(unsigned char* in_image,short *out_image,int *H,int width,int height)
{
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
if(x<0 || x>width || y<0 || y>height)
{
return;
}
int pos = y*width+x;
if(x==0||y==0||(x==width-1)||(y==height-1))
{
out_image[pos] = in_image[pos];
return;
}
int left = pos - 1;
int right = pos + 1;
int up = pos - width;
int down = pos + width;
int up_left = up - 1;
int up_right = up + 1;
int down_left = down - 1;
int down_right = down + 1;
out_image[pos] = H[0]*in_image[up_left] + H[1]*in_image[up] + H[2]*in_image[up_right]
+H[3]*in_image[left] + H[4]*in_image[pos] + H[5]*in_image[right]
+H[6]*in_image[down_left] + H[7]*in_image[down] + H[8]*in_image[down_right];
}
int main()
{
//in data
Mat in_image = imread("test.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat out_image = Mat(in_image.size(),CV_16S);
//convolution kernel
int H[9];
H[0]=-1;H[1]=-1;H[2]=-1;
H[3]=-1;H[4]= 8;H[5]=-1;
H[6]=-1;H[7]=-1;H[8]=-1;
//calc
Timer start_time;
//init CUDA
//error status
cudaError_t cuda_status;
//only chose one GPU
//init
cuda_status = cudaSetDevice(0);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaSetDevice failed! Do you have a CUDA-Capable GPU installed?");
return -1;
}
//in image and out image
unsigned char * dev_in_image;
short * dev_out_image;
int *dev_H;
//size of image
int image_size = in_image.cols*in_image.rows;
//allocate memory on the GPU
cuda_status = cudaMalloc((void**)&dev_in_image,sizeof(unsigned char)*image_size);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaMalloc Failed");
exit( EXIT_FAILURE );
}
cuda_status = cudaMalloc((void**)&dev_out_image,sizeof(short)*image_size);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaMalloc Failed");
exit( EXIT_FAILURE );
}
cuda_status = cudaMalloc((void**)&dev_H,sizeof(int)*9);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaMalloc Failed");
exit( EXIT_FAILURE );
}
//copy
cuda_status = cudaMemcpy(dev_in_image,in_image.data,sizeof(unsigned char)*image_size,cudaMemcpyHostToDevice);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaMemcpy Failed");
exit( EXIT_FAILURE );
}
cudaMemset(dev_out_image,0,sizeof(short)*image_size);
cuda_status = cudaMemcpy(dev_H,H,sizeof(int)*9,cudaMemcpyHostToDevice);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaMemcpy Failed");
exit( EXIT_FAILURE );
}
dim3 threads(16,16);
dim3 grid(max((in_image.cols+threads.x-1)/threads.x,1),max((in_image.rows+threads.y-1)/threads.y,1));
convolution_kernel<<<grid,threads>>>(dev_in_image,dev_out_image,dev_H,in_image.cols,in_image.rows);
//copy out
cuda_status = cudaMemcpy((short*)out_image.data,dev_out_image,sizeof(short)*image_size,cudaMemcpyDeviceToHost);
if(cuda_status != cudaSuccess)
{
fprintf(stderr,"cudaMemcpy Failed");
exit( EXIT_FAILURE );
}
cudaFree(dev_in_image);
cudaFree(dev_out_image);
cudaFree(dev_H);
cout<<start_time.elapsedMs()<<endl;
//output
Mat abs_dst;
convertScaleAbs( out_image, abs_dst );
imwrite("cuda.jpg",abs_dst);
return 0;
} |
5dca36de20ed47fcb27b10f01eab091b9b909223.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* wcsphStandard.cu
*
* Author: Kamil Szewc ([email protected])
* Modified on: 27-09-2014
*
*/
#include <thrust/device_vector.h>
#include "../sph.h"
#include "../errlog.h"
#include "wcsphStandard.cuh"
#include "general/calcTimeStep/calcTimeStep.cuh"
#include "general/smoothingDensity/smoothingDensity.cuh"
#include "../methods/hashSortReorder.cuh"
#include "../methods/copyParticles.cuh"
void modelWcsphStandard(int NOB, int TPB,
thrust::device_vector<Particle>& pVector,
Particle *pSort,
ParticleBasic *pOld,
uint *gridParticleHash,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par,
Parameters *parHost,
real time)
{
STARTLOG("logs/models.log");
Particle* p = thrust::raw_pointer_cast(pVector.data());
calcTimeStep(pVector, par, parHost);
hashSortReorder(NOB, TPB, p, par, pSort, gridParticleHash, gridParticleIndex, cellStart, cellEnd, parHost->N);
copyParticles << <NOB, TPB >> >(pSort, p, gridParticleIndex, true, par);
static int step = 1;
if ((parHost->T_SMOOTHING_DENSITY != 0) && (step%parHost->T_SMOOTHING_DENSITY == 0))
{
smoothingDensity << <NOB, TPB >> >(pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("smoothingDensity");
}
step++;
hipLaunchKernelGGL(( calcPressureWS) , dim3(NOB), dim3(TPB), 0, 0, pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcPressureWS");
hipLaunchKernelGGL(( calcInteractionWS) , dim3(NOB), dim3(TPB), 0, 0, pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcInteractionWS");
if (parHost->T_SURFACE_TENSION != 0) {
// No surface tension model
}
if (parHost->T_XSPH != 0) {
hipLaunchKernelGGL(( calcXsphWS) , dim3(NOB), dim3(TPB), 0, 0, pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcXsphWS");
}
hipLaunchKernelGGL(( calcAdvectionWS) , dim3(NOB), dim3(TPB), 0, 0, pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcAdvectionWS");
copyParticles << <NOB, TPB >> >(p, pSort, gridParticleIndex, false, par);
}
| 5dca36de20ed47fcb27b10f01eab091b9b909223.cu | /*
* wcsphStandard.cu
*
* Author: Kamil Szewc ([email protected])
* Modified on: 27-09-2014
*
*/
#include <thrust/device_vector.h>
#include "../sph.h"
#include "../errlog.h"
#include "wcsphStandard.cuh"
#include "general/calcTimeStep/calcTimeStep.cuh"
#include "general/smoothingDensity/smoothingDensity.cuh"
#include "../methods/hashSortReorder.cuh"
#include "../methods/copyParticles.cuh"
void modelWcsphStandard(int NOB, int TPB,
thrust::device_vector<Particle>& pVector,
Particle *pSort,
ParticleBasic *pOld,
uint *gridParticleHash,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par,
Parameters *parHost,
real time)
{
STARTLOG("logs/models.log");
Particle* p = thrust::raw_pointer_cast(pVector.data());
calcTimeStep(pVector, par, parHost);
hashSortReorder(NOB, TPB, p, par, pSort, gridParticleHash, gridParticleIndex, cellStart, cellEnd, parHost->N);
copyParticles << <NOB, TPB >> >(pSort, p, gridParticleIndex, true, par);
static int step = 1;
if ((parHost->T_SMOOTHING_DENSITY != 0) && (step%parHost->T_SMOOTHING_DENSITY == 0))
{
smoothingDensity << <NOB, TPB >> >(pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("smoothingDensity");
}
step++;
calcPressureWS <<<NOB, TPB>>>(pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcPressureWS");
calcInteractionWS <<<NOB, TPB>>>(pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcInteractionWS");
if (parHost->T_SURFACE_TENSION != 0) {
// No surface tension model
}
if (parHost->T_XSPH != 0) {
calcXsphWS <<<NOB, TPB>>>(pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcXsphWS");
}
calcAdvectionWS <<<NOB, TPB>>>(pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcAdvectionWS");
copyParticles << <NOB, TPB >> >(p, pSort, gridParticleIndex, false, par);
}
|
2393446bdf168800fa0050b7eba1132f40f07443.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _KMEANS_CUDA_KERNEL_H_
#define _KMEANS_CUDA_KERNEL_H_
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "kmeans.h"
// FIXME: Make this a runtime selectable variable!
#define ASSUMED_NR_CLUSTERS 5
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
// t_features has the layout dim0[points 0-m-1]dim1[ points 0-m-1]...
texture<float, 1, hipReadModeElementType> t_features;
// t_features_flipped has the layout point0[dim 0-n-1]point1[dim 0-n-1]
texture<float, 1, hipReadModeElementType> t_features_flipped;
texture<float, 1, hipReadModeElementType> t_clusters;
__constant__ float c_clusters[ASSUMED_NR_CLUSTERS*34]; /* constant memory for cluster centers */
/* ----------------- invert_mapping() --------------------- */
/* inverts data array from row-major to column-major.
[p0,dim0][p0,dim1][p0,dim2] ...
[p1,dim0][p1,dim1][p1,dim2] ...
[p2,dim0][p2,dim1][p2,dim2] ...
to
[dim0,p0][dim0,p1][dim0,p2] ...
[dim1,p0][dim1,p1][dim1,p2] ...
[dim2,p0][dim2,p1][dim2,p2] ...
*/
__global__ void invert_mapping(float *input, /* original */
float *output, /* inverted */
int npoints, /* npoints */
int nfeatures) /* nfeatures */
{
int point_id = threadIdx.x + blockDim.x*blockIdx.x; /* id of thread */
int i;
if(point_id < npoints){
for(i=0;i<nfeatures;i++)
output[point_id + npoints*i] = input[point_id*nfeatures + i];
}
return;
}
/* ----------------- invert_mapping() end --------------------- */
/* to turn on the GPU delta and center reduction */
//#define GPU_DELTA_REDUCTION
//#define GPU_NEW_CENTER_REDUCTION
/* ----------------- kmeansPoint() --------------------- */
/* find the index of nearest cluster centers and change membership*/
__global__ void
kmeansPoint(float *features, /* in: [npoints*nfeatures] */
int nfeatures,
int npoints,
int nclusters,
int *membership,
float *clusters,
float *block_clusters,
int *block_deltas)
{
// block ID
const unsigned int block_id = gridDim.x*blockIdx.y+blockIdx.x;
// point/thread ID
const unsigned int point_id = block_id*blockDim.x*blockDim.y + threadIdx.x;
int index = -1;
if (point_id < npoints)
{
int i, j;
float min_dist = FLT_MAX;
float dist; /* distance square between a point to cluster center */
/* find the cluster center id with min distance to pt */
for (i=0; i<nclusters; i++) {
int cluster_base_index = i*nfeatures; /* base index of cluster centers for inverted array */
float ans=0.0; /* Euclidean distance sqaure */
for (j=0; j < nfeatures; j++)
{
int addr = point_id + j*npoints; /* appropriate index of data point */
float diff = (tex1Dfetch(t_features,addr) -
c_clusters[cluster_base_index + j]); /* distance between a data point to cluster centers */
ans += diff*diff; /* sum of squares */
}
dist = ans;
/* see if distance is smaller than previous ones:
if so, change minimum distance and save index of cluster center */
if (dist < min_dist) {
min_dist = dist;
index = i;
}
}
}
#ifdef GPU_DELTA_REDUCTION
// count how many points are now closer to a different cluster center
__shared__ int deltas[THREADS_PER_BLOCK];
if(threadIdx.x < THREADS_PER_BLOCK) {
deltas[threadIdx.x] = 0;
}
#endif
if (point_id < npoints)
{
#ifdef GPU_DELTA_REDUCTION
/* if membership changes, increase delta by 1 */
if (membership[point_id] != index) {
deltas[threadIdx.x] = 1;
}
#endif
/* assign the membership to object point_id */
membership[point_id] = index;
}
#ifdef GPU_DELTA_REDUCTION
// make sure all the deltas have finished writing to shared memory
__syncthreads();
// now let's count them
// primitve reduction follows
unsigned int threadids_participating = THREADS_PER_BLOCK / 2;
for(;threadids_participating > 1; threadids_participating /= 2) {
if(threadIdx.x < threadids_participating) {
deltas[threadIdx.x] += deltas[threadIdx.x + threadids_participating];
}
__syncthreads();
}
if(threadIdx.x < 1) {deltas[threadIdx.x] += deltas[threadIdx.x + 1];}
__syncthreads();
// propagate number of changes to global counter
if(threadIdx.x == 0) {
block_deltas[blockIdx.y * gridDim.x + blockIdx.x] = deltas[0];
//printf("original id: %d, modified: %d\n", blockIdx.y*gridDim.x+blockIdx.x, blockIdx.x);
}
#endif
#ifdef GPU_NEW_CENTER_REDUCTION
int center_id = threadIdx.x / nfeatures;
int dim_id = threadIdx.x - nfeatures*center_id;
__shared__ int new_center_ids[THREADS_PER_BLOCK];
new_center_ids[threadIdx.x] = index;
__syncthreads();
/***
determine which dimension calculte the sum for
mapping of threads is
center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]...
***/
int new_base_index = (point_id - threadIdx.x)*nfeatures + dim_id;
float accumulator = 0.f;
if(threadIdx.x < nfeatures * nclusters) {
// accumulate over all the elements of this threadblock
for(int i = 0; i< (THREADS_PER_BLOCK); i++) {
float val = tex1Dfetch(t_features_flipped,new_base_index+i*nfeatures);
if(new_center_ids[i] == center_id)
accumulator += val;
}
// now store the sum for this threadblock
/***
mapping to global array is
block0[center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]...]block1[...]...
***/
block_clusters[(blockIdx.y*gridDim.x + blockIdx.x) * nclusters * nfeatures + threadIdx.x] = accumulator;
}
#endif
}
#endif // #ifndef _KMEANS_CUDA_KERNEL_H_
| 2393446bdf168800fa0050b7eba1132f40f07443.cu | #ifndef _KMEANS_CUDA_KERNEL_H_
#define _KMEANS_CUDA_KERNEL_H_
#include <stdio.h>
#include <cuda.h>
#include "kmeans.h"
// FIXME: Make this a runtime selectable variable!
#define ASSUMED_NR_CLUSTERS 5
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
// t_features has the layout dim0[points 0-m-1]dim1[ points 0-m-1]...
texture<float, 1, cudaReadModeElementType> t_features;
// t_features_flipped has the layout point0[dim 0-n-1]point1[dim 0-n-1]
texture<float, 1, cudaReadModeElementType> t_features_flipped;
texture<float, 1, cudaReadModeElementType> t_clusters;
__constant__ float c_clusters[ASSUMED_NR_CLUSTERS*34]; /* constant memory for cluster centers */
/* ----------------- invert_mapping() --------------------- */
/* inverts data array from row-major to column-major.
[p0,dim0][p0,dim1][p0,dim2] ...
[p1,dim0][p1,dim1][p1,dim2] ...
[p2,dim0][p2,dim1][p2,dim2] ...
to
[dim0,p0][dim0,p1][dim0,p2] ...
[dim1,p0][dim1,p1][dim1,p2] ...
[dim2,p0][dim2,p1][dim2,p2] ...
*/
__global__ void invert_mapping(float *input, /* original */
float *output, /* inverted */
int npoints, /* npoints */
int nfeatures) /* nfeatures */
{
int point_id = threadIdx.x + blockDim.x*blockIdx.x; /* id of thread */
int i;
if(point_id < npoints){
for(i=0;i<nfeatures;i++)
output[point_id + npoints*i] = input[point_id*nfeatures + i];
}
return;
}
/* ----------------- invert_mapping() end --------------------- */
/* to turn on the GPU delta and center reduction */
//#define GPU_DELTA_REDUCTION
//#define GPU_NEW_CENTER_REDUCTION
/* ----------------- kmeansPoint() --------------------- */
/* find the index of nearest cluster centers and change membership*/
__global__ void
kmeansPoint(float *features, /* in: [npoints*nfeatures] */
int nfeatures,
int npoints,
int nclusters,
int *membership,
float *clusters,
float *block_clusters,
int *block_deltas)
{
// block ID
const unsigned int block_id = gridDim.x*blockIdx.y+blockIdx.x;
// point/thread ID
const unsigned int point_id = block_id*blockDim.x*blockDim.y + threadIdx.x;
int index = -1;
if (point_id < npoints)
{
int i, j;
float min_dist = FLT_MAX;
float dist; /* distance square between a point to cluster center */
/* find the cluster center id with min distance to pt */
for (i=0; i<nclusters; i++) {
int cluster_base_index = i*nfeatures; /* base index of cluster centers for inverted array */
float ans=0.0; /* Euclidean distance sqaure */
for (j=0; j < nfeatures; j++)
{
int addr = point_id + j*npoints; /* appropriate index of data point */
float diff = (tex1Dfetch(t_features,addr) -
c_clusters[cluster_base_index + j]); /* distance between a data point to cluster centers */
ans += diff*diff; /* sum of squares */
}
dist = ans;
/* see if distance is smaller than previous ones:
if so, change minimum distance and save index of cluster center */
if (dist < min_dist) {
min_dist = dist;
index = i;
}
}
}
#ifdef GPU_DELTA_REDUCTION
// count how many points are now closer to a different cluster center
__shared__ int deltas[THREADS_PER_BLOCK];
if(threadIdx.x < THREADS_PER_BLOCK) {
deltas[threadIdx.x] = 0;
}
#endif
if (point_id < npoints)
{
#ifdef GPU_DELTA_REDUCTION
/* if membership changes, increase delta by 1 */
if (membership[point_id] != index) {
deltas[threadIdx.x] = 1;
}
#endif
/* assign the membership to object point_id */
membership[point_id] = index;
}
#ifdef GPU_DELTA_REDUCTION
// make sure all the deltas have finished writing to shared memory
__syncthreads();
// now let's count them
// primitve reduction follows
unsigned int threadids_participating = THREADS_PER_BLOCK / 2;
for(;threadids_participating > 1; threadids_participating /= 2) {
if(threadIdx.x < threadids_participating) {
deltas[threadIdx.x] += deltas[threadIdx.x + threadids_participating];
}
__syncthreads();
}
if(threadIdx.x < 1) {deltas[threadIdx.x] += deltas[threadIdx.x + 1];}
__syncthreads();
// propagate number of changes to global counter
if(threadIdx.x == 0) {
block_deltas[blockIdx.y * gridDim.x + blockIdx.x] = deltas[0];
//printf("original id: %d, modified: %d\n", blockIdx.y*gridDim.x+blockIdx.x, blockIdx.x);
}
#endif
#ifdef GPU_NEW_CENTER_REDUCTION
int center_id = threadIdx.x / nfeatures;
int dim_id = threadIdx.x - nfeatures*center_id;
__shared__ int new_center_ids[THREADS_PER_BLOCK];
new_center_ids[threadIdx.x] = index;
__syncthreads();
/***
determine which dimension calculte the sum for
mapping of threads is
center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]...
***/
int new_base_index = (point_id - threadIdx.x)*nfeatures + dim_id;
float accumulator = 0.f;
if(threadIdx.x < nfeatures * nclusters) {
// accumulate over all the elements of this threadblock
for(int i = 0; i< (THREADS_PER_BLOCK); i++) {
float val = tex1Dfetch(t_features_flipped,new_base_index+i*nfeatures);
if(new_center_ids[i] == center_id)
accumulator += val;
}
// now store the sum for this threadblock
/***
mapping to global array is
block0[center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]...]block1[...]...
***/
block_clusters[(blockIdx.y*gridDim.x + blockIdx.x) * nclusters * nfeatures + threadIdx.x] = accumulator;
}
#endif
}
#endif // #ifndef _KMEANS_CUDA_KERNEL_H_
|
4c9b9c8953601511344d20e0fea272bb3d15b3d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <mutex>
#include <type_traits>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <cuda_fp16.hpp>
#include "chainerx/array.h"
#include "chainerx/axes.h"
#include "chainerx/backend.h"
#include "chainerx/backend_util.h"
#include "chainerx/cuda/rocblas.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/float16.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/error.h"
#include "chainerx/float16.h"
#include "chainerx/kernels/creation.h"
#include "chainerx/kernels/linalg.h"
#include "chainerx/kernels/math.h"
#include "chainerx/kernels/misc.h"
#include "chainerx/macro.h"
#include "chainerx/routines/creation.h"
#include "chainerx/routines/math.h"
namespace chainerx {
namespace cuda {
namespace {
// Dispatch gemm routines based on the element type T
template <typename T>
struct Gemm;
template <>
struct Gemm<float> {
template <typename... Args>
hipblasStatus_t operator()(Args&&... args) const {
return hipblasSgemm(std::forward<Args>(args)...);
}
};
template <>
struct Gemm<double> {
template <typename... Args>
hipblasStatus_t operator()(Args&&... args) const {
return hipblasDgemm(std::forward<Args>(args)...);
}
};
struct GemmInputLayout {
int64_t ld = 0;
hipblasOperation_t trans = HIPBLAS_OP_T;
// Configure leading dimension and transposition accordingly, and makes the array C contiguous if necessary.
Array Configure(const Array& a) {
CHAINERX_ASSERT(a.ndim() == 2);
// Row-major
// Note that this condition is slightly relaxed than Array::IsContiguous() which requires
// a.strides()[0] == a.GetItemSize() * a.shape()[1]
if (a.strides()[1] == a.GetItemSize() && a.strides()[0] / a.GetItemSize() >= a.shape()[1] &&
a.strides()[0] % a.GetItemSize() == 0) {
ld = a.strides()[0] / a.GetItemSize();
trans = HIPBLAS_OP_N; // transposed
return a;
}
// Column-major
if (a.strides()[0] == a.GetItemSize() && a.strides()[1] / a.GetItemSize() >= a.shape()[0] &&
a.strides()[1] % a.GetItemSize() == 0) {
ld = a.strides()[1] / a.GetItemSize();
return a;
}
// Force row-major contiguous
ld = a.shape()[1];
trans = HIPBLAS_OP_N; // transposed
return internal::AsContiguous(a);
}
};
} // namespace
class CudaDotKernel : public DotKernel {
public:
void Call(const Array& a, const Array& b, const Array& out) override {
Device& device = a.device();
device.CheckDevicesCompatible(a, b, out);
CudaSetDeviceScope scope{device.index()};
if (GetKind(out.dtype()) != DtypeKind::kFloat) {
throw NotImplementedError("dot is not implemented for non-float types in CUDA");
}
CHAINERX_ASSERT(a.ndim() == 2);
CHAINERX_ASSERT(b.ndim() == 2);
CHAINERX_ASSERT(out.ndim() == 2);
int64_t m = a.shape()[0];
int64_t k = a.shape()[1];
int64_t n = b.shape()[1];
CHAINERX_ASSERT(b.shape()[0] == k);
CHAINERX_ASSERT(out.shape()[0] == m);
CHAINERX_ASSERT(out.shape()[1] == n);
if (m == 1 && n == 1) {
// TODO(beam2d): Write a custom reduction kernel.
// TODO(hvy): Avoid unnecessary cast here when multiplication supports mixed dtypes.
const Array& a_cast = a.dtype() == out.dtype() ? a : a.AsType(out.dtype());
const Array& b_cast = b.dtype() == out.dtype() ? b : b.AsType(out.dtype());
device.backend().CallKernel<SumKernel>(a_cast.Reshape({k}) * b_cast.Reshape({k}), Axes{0}, out.Reshape({}));
return;
}
if (out.dtype() == Dtype::kFloat16) {
// TODO(imanishi): Use hipblasHgemm
Array out_float32 = Empty(out.shape(), Dtype::kFloat32, device);
device.backend().CallKernel<DotKernel>(a.AsType(Dtype::kFloat32), b.AsType(Dtype::kFloat32), out_float32);
device.backend().CallKernel<AsTypeKernel>(out_float32, out);
return;
}
bool is_out_contiguous = out.IsContiguous();
Array out_contiguous = is_out_contiguous ? out : EmptyLike(out, device);
const Array& a_cast = a.dtype() == out.dtype() ? a : a.AsType(out.dtype());
const Array& b_cast = b.dtype() == out.dtype() ? b : b.AsType(out.dtype());
auto gemm_impl = [&](auto pt) {
CHAINERX_ASSERT(a_cast.dtype() == out_contiguous.dtype());
CHAINERX_ASSERT(b_cast.dtype() == out_contiguous.dtype());
using T = typename decltype(pt)::type;
using StorageType = cuda_internal::StorageType<T>;
using CudaType = cuda_internal::DataType<T>;
// Note that cuBLAS uses Fortran order.
// To compute out = a x b, we use cuBLAS to compute out^T = b^T x a^T (here x is the matrix product).
GemmInputLayout a_cast_layout;
GemmInputLayout b_cast_layout;
Array a_cast_config = a_cast_layout.Configure(a_cast);
Array b_cast_config = b_cast_layout.Configure(b_cast);
const CudaType one{chainerx::Float16{1}};
const CudaType zero{chainerx::Float16{0}};
const CudaType* a_cast_ptr =
&cuda_internal::StorageToDataType<const T>(*static_cast<const StorageType*>(internal::GetRawOffsetData(a_cast_config)));
const CudaType* b_cast_ptr =
&cuda_internal::StorageToDataType<const T>(*static_cast<const StorageType*>(internal::GetRawOffsetData(b_cast_config)));
CudaType* out_ptr =
&cuda_internal::StorageToDataType<T>(*static_cast<StorageType*>(internal::GetRawOffsetData(out_contiguous)));
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device));
device_internals.cublas_handle().Call(
Gemm<T>{},
b_cast_layout.trans,
a_cast_layout.trans,
n,
m,
k,
&one,
b_cast_ptr,
b_cast_layout.ld,
a_cast_ptr,
a_cast_layout.ld,
&zero,
out_ptr,
n);
};
switch (out.dtype()) {
case Dtype::kFloat32:
gemm_impl(PrimitiveType<float>{});
break;
case Dtype::kFloat64:
gemm_impl(PrimitiveType<double>{});
break;
default:
CHAINERX_NEVER_REACH();
}
if (!is_out_contiguous) {
device.backend().CallKernel<CopyKernel>(out_contiguous, out);
}
}
};
CHAINERX_CUDA_REGISTER_KERNEL(DotKernel, CudaDotKernel);
} // namespace cuda
} // namespace chainerx
| 4c9b9c8953601511344d20e0fea272bb3d15b3d5.cu | #include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <mutex>
#include <type_traits>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cuda_fp16.hpp>
#include "chainerx/array.h"
#include "chainerx/axes.h"
#include "chainerx/backend.h"
#include "chainerx/backend_util.h"
#include "chainerx/cuda/cublas.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/float16.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/error.h"
#include "chainerx/float16.h"
#include "chainerx/kernels/creation.h"
#include "chainerx/kernels/linalg.h"
#include "chainerx/kernels/math.h"
#include "chainerx/kernels/misc.h"
#include "chainerx/macro.h"
#include "chainerx/routines/creation.h"
#include "chainerx/routines/math.h"
namespace chainerx {
namespace cuda {
namespace {
// Dispatch gemm routines based on the element type T
template <typename T>
struct Gemm;
template <>
struct Gemm<float> {
template <typename... Args>
cublasStatus_t operator()(Args&&... args) const {
return cublasSgemm(std::forward<Args>(args)...);
}
};
template <>
struct Gemm<double> {
template <typename... Args>
cublasStatus_t operator()(Args&&... args) const {
return cublasDgemm(std::forward<Args>(args)...);
}
};
struct GemmInputLayout {
int64_t ld = 0;
cublasOperation_t trans = CUBLAS_OP_T;
// Configure leading dimension and transposition accordingly, and makes the array C contiguous if necessary.
Array Configure(const Array& a) {
CHAINERX_ASSERT(a.ndim() == 2);
// Row-major
// Note that this condition is slightly relaxed than Array::IsContiguous() which requires
// a.strides()[0] == a.GetItemSize() * a.shape()[1]
if (a.strides()[1] == a.GetItemSize() && a.strides()[0] / a.GetItemSize() >= a.shape()[1] &&
a.strides()[0] % a.GetItemSize() == 0) {
ld = a.strides()[0] / a.GetItemSize();
trans = CUBLAS_OP_N; // transposed
return a;
}
// Column-major
if (a.strides()[0] == a.GetItemSize() && a.strides()[1] / a.GetItemSize() >= a.shape()[0] &&
a.strides()[1] % a.GetItemSize() == 0) {
ld = a.strides()[1] / a.GetItemSize();
return a;
}
// Force row-major contiguous
ld = a.shape()[1];
trans = CUBLAS_OP_N; // transposed
return internal::AsContiguous(a);
}
};
} // namespace
class CudaDotKernel : public DotKernel {
public:
void Call(const Array& a, const Array& b, const Array& out) override {
Device& device = a.device();
device.CheckDevicesCompatible(a, b, out);
CudaSetDeviceScope scope{device.index()};
if (GetKind(out.dtype()) != DtypeKind::kFloat) {
throw NotImplementedError("dot is not implemented for non-float types in CUDA");
}
CHAINERX_ASSERT(a.ndim() == 2);
CHAINERX_ASSERT(b.ndim() == 2);
CHAINERX_ASSERT(out.ndim() == 2);
int64_t m = a.shape()[0];
int64_t k = a.shape()[1];
int64_t n = b.shape()[1];
CHAINERX_ASSERT(b.shape()[0] == k);
CHAINERX_ASSERT(out.shape()[0] == m);
CHAINERX_ASSERT(out.shape()[1] == n);
if (m == 1 && n == 1) {
// TODO(beam2d): Write a custom reduction kernel.
// TODO(hvy): Avoid unnecessary cast here when multiplication supports mixed dtypes.
const Array& a_cast = a.dtype() == out.dtype() ? a : a.AsType(out.dtype());
const Array& b_cast = b.dtype() == out.dtype() ? b : b.AsType(out.dtype());
device.backend().CallKernel<SumKernel>(a_cast.Reshape({k}) * b_cast.Reshape({k}), Axes{0}, out.Reshape({}));
return;
}
if (out.dtype() == Dtype::kFloat16) {
// TODO(imanishi): Use cublasHgemm
Array out_float32 = Empty(out.shape(), Dtype::kFloat32, device);
device.backend().CallKernel<DotKernel>(a.AsType(Dtype::kFloat32), b.AsType(Dtype::kFloat32), out_float32);
device.backend().CallKernel<AsTypeKernel>(out_float32, out);
return;
}
bool is_out_contiguous = out.IsContiguous();
Array out_contiguous = is_out_contiguous ? out : EmptyLike(out, device);
const Array& a_cast = a.dtype() == out.dtype() ? a : a.AsType(out.dtype());
const Array& b_cast = b.dtype() == out.dtype() ? b : b.AsType(out.dtype());
auto gemm_impl = [&](auto pt) {
CHAINERX_ASSERT(a_cast.dtype() == out_contiguous.dtype());
CHAINERX_ASSERT(b_cast.dtype() == out_contiguous.dtype());
using T = typename decltype(pt)::type;
using StorageType = cuda_internal::StorageType<T>;
using CudaType = cuda_internal::DataType<T>;
// Note that cuBLAS uses Fortran order.
// To compute out = a x b, we use cuBLAS to compute out^T = b^T x a^T (here x is the matrix product).
GemmInputLayout a_cast_layout;
GemmInputLayout b_cast_layout;
Array a_cast_config = a_cast_layout.Configure(a_cast);
Array b_cast_config = b_cast_layout.Configure(b_cast);
const CudaType one{chainerx::Float16{1}};
const CudaType zero{chainerx::Float16{0}};
const CudaType* a_cast_ptr =
&cuda_internal::StorageToDataType<const T>(*static_cast<const StorageType*>(internal::GetRawOffsetData(a_cast_config)));
const CudaType* b_cast_ptr =
&cuda_internal::StorageToDataType<const T>(*static_cast<const StorageType*>(internal::GetRawOffsetData(b_cast_config)));
CudaType* out_ptr =
&cuda_internal::StorageToDataType<T>(*static_cast<StorageType*>(internal::GetRawOffsetData(out_contiguous)));
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(static_cast<CudaDevice&>(device));
device_internals.cublas_handle().Call(
Gemm<T>{},
b_cast_layout.trans,
a_cast_layout.trans,
n,
m,
k,
&one,
b_cast_ptr,
b_cast_layout.ld,
a_cast_ptr,
a_cast_layout.ld,
&zero,
out_ptr,
n);
};
switch (out.dtype()) {
case Dtype::kFloat32:
gemm_impl(PrimitiveType<float>{});
break;
case Dtype::kFloat64:
gemm_impl(PrimitiveType<double>{});
break;
default:
CHAINERX_NEVER_REACH();
}
if (!is_out_contiguous) {
device.backend().CallKernel<CopyKernel>(out_contiguous, out);
}
}
};
CHAINERX_CUDA_REGISTER_KERNEL(DotKernel, CudaDotKernel);
} // namespace cuda
} // namespace chainerx
|
2b201bc0add368217330f1043424914b2955a2f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define W 500
#define H 500
#define TX 32 // number of threads per block along x-axis
#define TY 32 // number of threads per block along y-axis
__device__
unsigned char clip(int n) {
return n > 255 ? 255 : (n < 0 ? 0 : n);
}
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
const int i = r * w + c;
if ((c >= w) || (r >= h)) return;
// compute the distance (in pixel spacings)
const int d = sqrtf((c - pos.x) * (c - pos.x) + (r - pos.y) * (r - pos.y));
// convert distance to intensity value on interval [0, 255]
const unsigned char intensity = clip(255 - d);
d_out[i].x = intensity; // red channel
d_out[i].y = intensity; // green channel
d_out[i].z = 0; // blue channel
d_out[i].z = 255; // fully opaque (alpha channel)
}
int main() {
uchar4 *out = (uchar4*)calloc(W*H, sizeof(uchar4));
uchar4 *d_out; // pointer for device array
hipMalloc(&d_out, W * H * sizeof(uchar4));
const int2 pos = {0, 0}; // set reference position
const dim3 blockSize(TX, TY);
const int bx = (W + TX - 1)/TX;
const int by = (W + TY - 1)/TY;
const dim3 gridSize = dim3(bx, by);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, W, H, pos);
// copy the results to host
hipMemcpy(out, d_out, W*H*sizeof(uchar4), hipMemcpyDeviceToHost);
hipFree(d_out);
free(out);
return 0;
}
| 2b201bc0add368217330f1043424914b2955a2f9.cu | #define W 500
#define H 500
#define TX 32 // number of threads per block along x-axis
#define TY 32 // number of threads per block along y-axis
__device__
unsigned char clip(int n) {
return n > 255 ? 255 : (n < 0 ? 0 : n);
}
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
const int i = r * w + c;
if ((c >= w) || (r >= h)) return;
// compute the distance (in pixel spacings)
const int d = sqrtf((c - pos.x) * (c - pos.x) + (r - pos.y) * (r - pos.y));
// convert distance to intensity value on interval [0, 255]
const unsigned char intensity = clip(255 - d);
d_out[i].x = intensity; // red channel
d_out[i].y = intensity; // green channel
d_out[i].z = 0; // blue channel
d_out[i].z = 255; // fully opaque (alpha channel)
}
int main() {
uchar4 *out = (uchar4*)calloc(W*H, sizeof(uchar4));
uchar4 *d_out; // pointer for device array
cudaMalloc(&d_out, W * H * sizeof(uchar4));
const int2 pos = {0, 0}; // set reference position
const dim3 blockSize(TX, TY);
const int bx = (W + TX - 1)/TX;
const int by = (W + TY - 1)/TY;
const dim3 gridSize = dim3(bx, by);
distanceKernel<<<gridSize, blockSize>>>(d_out, W, H, pos);
// copy the results to host
cudaMemcpy(out, d_out, W*H*sizeof(uchar4), cudaMemcpyDeviceToHost);
cudaFree(d_out);
free(out);
return 0;
}
|
fc7e2ee3bb92c968a5249f80a1810dc85afbf251.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ker_dense_to_sparse_block_add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
const unsigned *idx = NULL;
hipMalloc(&idx, XSIZE*YSIZE);
int bsize = XSIZE*YSIZE;
float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
float *trg = NULL;
hipMalloc(&trg, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ker_dense_to_sparse_block_add), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,bsize,src,trg);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ker_dense_to_sparse_block_add), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,bsize,src,trg);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ker_dense_to_sparse_block_add), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,bsize,src,trg);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | fc7e2ee3bb92c968a5249f80a1810dc85afbf251.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ker_dense_to_sparse_block_add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
const unsigned *idx = NULL;
cudaMalloc(&idx, XSIZE*YSIZE);
int bsize = XSIZE*YSIZE;
float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
float *trg = NULL;
cudaMalloc(&trg, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ker_dense_to_sparse_block_add<<<gridBlock,threadBlock>>>(n,idx,bsize,src,trg);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ker_dense_to_sparse_block_add<<<gridBlock,threadBlock>>>(n,idx,bsize,src,trg);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ker_dense_to_sparse_block_add<<<gridBlock,threadBlock>>>(n,idx,bsize,src,trg);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
176c091fc125cf90b1cf49becae0bf79fc259e8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "TransferHelper.h"
#include "TgaReader.h"
#include <cassert>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
#include "CudaHelper.h"
#include <cstring>
int TransferHelper::m_intArea[gGridSizeExternal * gGridSizeExternal];
float TransferHelper::m_floatArea[gGridSizeExternal * gGridSizeExternal];
UnsignedArray TransferHelper::UploadPicture(TgaReader* reader, unsigned char boundaryValue)
{
assert(reader->GetWidth() == gGridSizeInternal);
assert(reader->GetHeight() == gGridSizeInternal);
unsigned char* internalPixelInformation = reader->GetPixels();
for(int row = 0; row < gGridSizeExternal; ++row)
for(int column = 0; column < gGridSizeExternal; ++column)
{
unsigned char destinationValue = boundaryValue;
if ((row != 0) && (row != gGridSizeExternal - 1) && (column != 0) && (column != gGridSizeExternal - 1))
destinationValue = internalPixelInformation[3 * ((column - 1) + gGridSizeInternal * (row - 1))];
m_intArea[column + row * gGridSizeExternal] = destinationValue;
}
// Allocate device memory.
void* memory;
size_t pitch;
hipMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
hipMemcpy2D(memory, pitch, m_intArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, hipMemcpyHostToDevice);
pitch /= 4;
UnsignedArray result;
result.m_array = (unsigned int*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::UploadPictureAsFloat(TgaReader* reader, float boundaryValue, float minValueMapped,
float maxValueMapped)
{
assert(reader->GetWidth() == gGridSizeInternal);
assert(reader->GetHeight() == gGridSizeInternal);
unsigned char* internalPixelInformation = reader->GetPixels();
for (int row = 0; row < gGridSizeExternal; ++row)
for (int column = 0; column < gGridSizeExternal; ++column)
{
float destinationValue = boundaryValue;
if ((row != 0) && (row != gGridSizeExternal - 1) && (column != 0) && (column != gGridSizeExternal - 1))
{
destinationValue = internalPixelInformation[3 * ((column - 1) + gGridSizeInternal * (row - 1))];
destinationValue = minValueMapped + (maxValueMapped - minValueMapped) * destinationValue / 255.0f;
}
m_floatArea[column + row * gGridSizeExternal] = destinationValue;
}
// Allocate device memory.
void* memory;
size_t pitch;
hipMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
hipMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, hipMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::ReserveFloatMemory()
{
// Allocate device memory.
void* memory;
size_t pitch;
hipMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
// We fill all with zero at the beginnig.
memset(m_floatArea, 0, gGridSizeExternal * gGridSizeExternal * 4);
hipMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, hipMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
UnsignedArray TransferHelper::ReserveUnsignedMemory()
{
// Allocate device memory.
void* memory;
size_t pitch;
hipMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
// We fill all with zero at the beginnig.
memset(m_intArea, 0, gGridSizeExternal * gGridSizeExternal * 4);
hipMemcpy2D(memory, pitch, m_intArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, hipMemcpyHostToDevice);
pitch /= 4;
UnsignedArray result;
result.m_array = (unsigned int*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::UpfrontFilledValue(float value)
{
for (int row = 0; row < gGridSizeExternal; ++row)
for (int column = 0; column < gGridSizeExternal; ++column)
{
m_floatArea[column + row * gGridSizeExternal] = value;
}
// Allocate device memory.
void* memory;
size_t pitch;
hipMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
hipMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, hipMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::BuildHorizontalGradient(float startMax, int direction)
{
for (int row = 0; row < gGridSizeExternal; ++row)
for (int column = 0; column < gGridSizeExternal; ++column)
{
float destinationValue;
if (direction == 1)
destinationValue = startMax * ((float)column) / (gGridSizeExternal - 1);
else
destinationValue = startMax - startMax * ((float)column) / (gGridSizeExternal - 1);
m_floatArea[column + row * gGridSizeExternal] = destinationValue;
}
// Allocate device memory.
void* memory;
size_t pitch;
hipMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
hipMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, hipMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::BuildVerticalGradient(float startMax, int direction)
{
for (int row = 0; row < gGridSizeExternal; ++row)
for (int column = 0; column < gGridSizeExternal; ++column)
{
float destinationValue;
if (direction == 1)
destinationValue = startMax * ((float)row) / (gGridSizeExternal - 1);
else
destinationValue = startMax - startMax * ((float)row) / (gGridSizeExternal - 1);
m_floatArea[column + row * gGridSizeExternal] = destinationValue;
}
// Allocate device memory.
void* memory;
size_t pitch;
hipMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
hipMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, hipMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::BuildRadialGradient(float startMax, int direction)
{
float maxDistance = sqrtf(2) * gGridSizeExternal / 2.0f;
for (int row = 0; row < gGridSizeExternal; ++row)
for (int column = 0; column < gGridSizeExternal; ++column)
{
float distance = sqrtf((float)((row - gGridSizeExternal / 2) * (row - gGridSizeExternal / 2) + (column - gGridSizeExternal / 2) * (column - gGridSizeExternal / 2)));
distance /= maxDistance;
if (direction == 1)
distance = 1.0f - distance;
m_floatArea[column + row * gGridSizeExternal] = startMax * distance;
}
// Allocate device memory.
void* memory;
size_t pitch;
hipMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
hipMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, hipMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
__global__ void CopyData(float* sourceArray, size_t sourceStride, float* destinationArray, size_t destinationStride)
{
int baseX = (threadIdx.x + blockIdx.x * blockDim.x) + 1;
int baseY = (threadIdx.y + blockIdx.y * blockDim.y) + 1;
destinationArray[baseX + baseY * destinationStride] = sourceArray[baseX + baseY * sourceStride];
// Here we have to deal with the boundaries.
if (baseX == 1)
{
destinationArray[baseY * destinationStride] = sourceArray[ baseY * sourceStride];
}
if (baseY == 1)
{
destinationArray[baseX ] = sourceArray[baseX ];
}
if (baseX == gGridSizeExternal - 2)
{
destinationArray[(gGridSizeExternal - 1) + baseY * destinationStride] = sourceArray[(gGridSizeExternal - 1) + baseY * sourceStride];
}
if (baseY == gGridSizeExternal - 2)
{
destinationArray[baseX + (gGridSizeExternal - 1) * destinationStride] = sourceArray[baseX + (gGridSizeExternal - 1) * sourceStride];
}
// The 4 corner cases.
if ((baseX == 1) && (baseY == 1))
{
destinationArray[0] = sourceArray[0];
destinationArray[gGridSizeExternal - 1] = sourceArray[gGridSizeExternal - 1];
destinationArray[(gGridSizeExternal - 1) * destinationStride] = sourceArray[(gGridSizeExternal - 1) * sourceStride];
destinationArray[(gGridSizeExternal - 1) + (gGridSizeExternal - 1) * destinationStride] = sourceArray[(gGridSizeExternal - 1) + (gGridSizeExternal - 1) * sourceStride];
}
}
void TransferHelper::CopyDataFromTo(FloatArray source, FloatArray destination)
{
assert(source.m_array);
assert(destination.m_array);
CopyData CUDA_DECORATOR_LOGIC (source.m_array, source.m_stride, destination.m_array, destination.m_stride);
}
| 176c091fc125cf90b1cf49becae0bf79fc259e8d.cu | #include "TransferHelper.h"
#include "TgaReader.h"
#include <cassert>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <math.h>
#include "CudaHelper.h"
#include <cstring>
int TransferHelper::m_intArea[gGridSizeExternal * gGridSizeExternal];
float TransferHelper::m_floatArea[gGridSizeExternal * gGridSizeExternal];
UnsignedArray TransferHelper::UploadPicture(TgaReader* reader, unsigned char boundaryValue)
{
assert(reader->GetWidth() == gGridSizeInternal);
assert(reader->GetHeight() == gGridSizeInternal);
unsigned char* internalPixelInformation = reader->GetPixels();
for(int row = 0; row < gGridSizeExternal; ++row)
for(int column = 0; column < gGridSizeExternal; ++column)
{
unsigned char destinationValue = boundaryValue;
if ((row != 0) && (row != gGridSizeExternal - 1) && (column != 0) && (column != gGridSizeExternal - 1))
destinationValue = internalPixelInformation[3 * ((column - 1) + gGridSizeInternal * (row - 1))];
m_intArea[column + row * gGridSizeExternal] = destinationValue;
}
// Allocate device memory.
void* memory;
size_t pitch;
cudaMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
cudaMemcpy2D(memory, pitch, m_intArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, cudaMemcpyHostToDevice);
pitch /= 4;
UnsignedArray result;
result.m_array = (unsigned int*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::UploadPictureAsFloat(TgaReader* reader, float boundaryValue, float minValueMapped,
float maxValueMapped)
{
assert(reader->GetWidth() == gGridSizeInternal);
assert(reader->GetHeight() == gGridSizeInternal);
unsigned char* internalPixelInformation = reader->GetPixels();
for (int row = 0; row < gGridSizeExternal; ++row)
for (int column = 0; column < gGridSizeExternal; ++column)
{
float destinationValue = boundaryValue;
if ((row != 0) && (row != gGridSizeExternal - 1) && (column != 0) && (column != gGridSizeExternal - 1))
{
destinationValue = internalPixelInformation[3 * ((column - 1) + gGridSizeInternal * (row - 1))];
destinationValue = minValueMapped + (maxValueMapped - minValueMapped) * destinationValue / 255.0f;
}
m_floatArea[column + row * gGridSizeExternal] = destinationValue;
}
// Allocate device memory.
void* memory;
size_t pitch;
cudaMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
cudaMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, cudaMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::ReserveFloatMemory()
{
// Allocate device memory.
void* memory;
size_t pitch;
cudaMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
// We fill all with zero at the beginnig.
memset(m_floatArea, 0, gGridSizeExternal * gGridSizeExternal * 4);
cudaMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, cudaMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
UnsignedArray TransferHelper::ReserveUnsignedMemory()
{
// Allocate device memory.
void* memory;
size_t pitch;
cudaMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
// We fill all with zero at the beginnig.
memset(m_intArea, 0, gGridSizeExternal * gGridSizeExternal * 4);
cudaMemcpy2D(memory, pitch, m_intArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, cudaMemcpyHostToDevice);
pitch /= 4;
UnsignedArray result;
result.m_array = (unsigned int*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::UpfrontFilledValue(float value)
{
for (int row = 0; row < gGridSizeExternal; ++row)
for (int column = 0; column < gGridSizeExternal; ++column)
{
m_floatArea[column + row * gGridSizeExternal] = value;
}
// Allocate device memory.
void* memory;
size_t pitch;
cudaMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
cudaMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, cudaMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::BuildHorizontalGradient(float startMax, int direction)
{
for (int row = 0; row < gGridSizeExternal; ++row)
for (int column = 0; column < gGridSizeExternal; ++column)
{
float destinationValue;
if (direction == 1)
destinationValue = startMax * ((float)column) / (gGridSizeExternal - 1);
else
destinationValue = startMax - startMax * ((float)column) / (gGridSizeExternal - 1);
m_floatArea[column + row * gGridSizeExternal] = destinationValue;
}
// Allocate device memory.
void* memory;
size_t pitch;
cudaMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
cudaMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, cudaMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::BuildVerticalGradient(float startMax, int direction)
{
for (int row = 0; row < gGridSizeExternal; ++row)
for (int column = 0; column < gGridSizeExternal; ++column)
{
float destinationValue;
if (direction == 1)
destinationValue = startMax * ((float)row) / (gGridSizeExternal - 1);
else
destinationValue = startMax - startMax * ((float)row) / (gGridSizeExternal - 1);
m_floatArea[column + row * gGridSizeExternal] = destinationValue;
}
// Allocate device memory.
void* memory;
size_t pitch;
cudaMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
cudaMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, cudaMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
FloatArray TransferHelper::BuildRadialGradient(float startMax, int direction)
{
float maxDistance = sqrtf(2) * gGridSizeExternal / 2.0f;
for (int row = 0; row < gGridSizeExternal; ++row)
for (int column = 0; column < gGridSizeExternal; ++column)
{
float distance = sqrtf((float)((row - gGridSizeExternal / 2) * (row - gGridSizeExternal / 2) + (column - gGridSizeExternal / 2) * (column - gGridSizeExternal / 2)));
distance /= maxDistance;
if (direction == 1)
distance = 1.0f - distance;
m_floatArea[column + row * gGridSizeExternal] = startMax * distance;
}
// Allocate device memory.
void* memory;
size_t pitch;
cudaMallocPitch(&memory, &pitch, gGridSizeExternal * 4, gGridSizeExternal);
cudaMemcpy2D(memory, pitch, m_floatArea, 4 * gGridSizeExternal, 4 * gGridSizeExternal, gGridSizeExternal, cudaMemcpyHostToDevice);
pitch /= 4;
FloatArray result;
result.m_array = (float*)memory;
result.m_stride = pitch;
return result;
}
__global__ void CopyData(float* sourceArray, size_t sourceStride, float* destinationArray, size_t destinationStride)
{
int baseX = (threadIdx.x + blockIdx.x * blockDim.x) + 1;
int baseY = (threadIdx.y + blockIdx.y * blockDim.y) + 1;
destinationArray[baseX + baseY * destinationStride] = sourceArray[baseX + baseY * sourceStride];
// Here we have to deal with the boundaries.
if (baseX == 1)
{
destinationArray[baseY * destinationStride] = sourceArray[ baseY * sourceStride];
}
if (baseY == 1)
{
destinationArray[baseX ] = sourceArray[baseX ];
}
if (baseX == gGridSizeExternal - 2)
{
destinationArray[(gGridSizeExternal - 1) + baseY * destinationStride] = sourceArray[(gGridSizeExternal - 1) + baseY * sourceStride];
}
if (baseY == gGridSizeExternal - 2)
{
destinationArray[baseX + (gGridSizeExternal - 1) * destinationStride] = sourceArray[baseX + (gGridSizeExternal - 1) * sourceStride];
}
// The 4 corner cases.
if ((baseX == 1) && (baseY == 1))
{
destinationArray[0] = sourceArray[0];
destinationArray[gGridSizeExternal - 1] = sourceArray[gGridSizeExternal - 1];
destinationArray[(gGridSizeExternal - 1) * destinationStride] = sourceArray[(gGridSizeExternal - 1) * sourceStride];
destinationArray[(gGridSizeExternal - 1) + (gGridSizeExternal - 1) * destinationStride] = sourceArray[(gGridSizeExternal - 1) + (gGridSizeExternal - 1) * sourceStride];
}
}
void TransferHelper::CopyDataFromTo(FloatArray source, FloatArray destination)
{
assert(source.m_array);
assert(destination.m_array);
CopyData CUDA_DECORATOR_LOGIC (source.m_array, source.m_stride, destination.m_array, destination.m_stride);
}
|
527c23c33229c557709a8ace4dd8b0582458217e.hip | // !!! This is a file automatically generated by hipify!!!
#include "helpers.cuh"
void CUDA_ERROR( hipError_t err){
if (err != hipSuccess) {
printf("CUDA ERROR: %s, exiting\n", hipGetErrorString(err));
exit(-1);
}
}
| 527c23c33229c557709a8ace4dd8b0582458217e.cu | #include "helpers.cuh"
void CUDA_ERROR( cudaError_t err){
if (err != cudaSuccess) {
printf("CUDA ERROR: %s, exiting\n", cudaGetErrorString(err));
exit(-1);
}
}
|
29132e1a7d669c20cd2cd00326db2c67c2089f42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
LICENCE
*/
//mps_gpu_cudaker.h
///implementation of cuda kernel functions
#include <cmath>
#include <cstdio>
#include <cassert>
#include "mps_gpu_cudaker.h"
#include "typedef.h"
#include "common.h"
#include "MPS_GPU.h"
namespace cudaker
{
inline hipError_t checkCuda(hipError_t result)
{
#ifdef DEBUG
if(result != hipSuccess)
{
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
inline hipblasStatus_t checkCublas(hipblasStatus_t result, char* msg)
{
#ifdef DEBUG
if(result != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "cublas Runtime Error: %s\n", msg);
assert(result == HIPBLAS_STATUS_SUCCESS);
}
#endif
return result;
}
__global__ void kerVecCpy(mytype::real* const des,
const mytype::real* const src,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[i] = src[i];
}
}
__global__ void kerAxpy(mytype::real* const z,
const mytype::real* const x,
const mytype::real* const y,
const mytype::real a, const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
z[i] = a * x[i] + y[i];
}
}
__global__ void kerMatVec(mytype::real* const des,
const mytype::real* const mat,
const mytype::real* const vec,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
mytype::real _tmp = 0.0;
if(i < n)
{
for(mytype::integer j=0;j<n;j++)
{
_tmp += mat[i*n+j] * vec[j];
}
des[i] = _tmp;
}
}
__global__ void kerVecVec(mytype::real& des,
const mytype::real* const vec1,
const mytype::real* const vec2,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
mytype::real _tmp = 0.0;
if(i == 0)
{
for(mytype::integer j=0;j<n;j++)
{
_tmp += vec1[j] * vec2[j];
}
des = _tmp;
}
}
void VecCpy(mytype::real* const des,
const mytype::real* const src,
const mytype::integer n)
{
hipLaunchKernelGGL(( kerVecCpy), dim3((n+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, des, src, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("cpy -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("cpy -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
void Axpy(mytype::real* const z,
const mytype::real* const x,
const mytype::real* const y,
const mytype::real a, const mytype::integer n)
{
hipLaunchKernelGGL(( kerAxpy), dim3((n+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, z, x, y, a, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("Axpy -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("Axpy -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
void MatVec(mytype::real* const des,
const mytype::real* const mat,
const mytype::real* const vec,
const mytype::integer n)
{
hipLaunchKernelGGL(( kerMatVec), dim3((n+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, des, mat, vec, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("MV -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("MV -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
void VecVec(mytype::real& des,
const mytype::real* const vec1,
const mytype::real* const vec2,
const mytype::integer n)
{
hipLaunchKernelGGL(( kerVecVec), dim3(1), dim3(NUM_THREADS), 0, 0, des, vec1, vec2, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("VV -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("VV -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
void CG(const mytype::real* const A, mytype::real* const x, const mytype::real* const b, const mytype::integer n)
{
const mytype::real _ZERO = 0.0;
const mytype::real _P_ONE = 1.0;
const mytype::real _N_ONE = -1.0;
int _num;
mytype::real _rrold;
mytype::real _rrnew;
mytype::real _alpha;
mytype::real _rn_over_ro;
/*-----device memory-----*/
mytype::real* dev_A;
mytype::real* dev_x;
mytype::real* dev_b;
mytype::real* dev_Ap;
mytype::real* dev_p;
mytype::real* dev_r;
#ifdef DEBUG
float time;
hipEvent_t startEvent, stopEvent;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
checkCuda( hipEventRecord(startEvent, 0) );
#endif
checkCuda( hipMalloc(&dev_A, n*n*sizeof(mytype::real)) );
checkCuda( hipMalloc(&dev_x, n*sizeof(mytype::real)) );
checkCuda( hipMalloc(&dev_b, n*sizeof(mytype::real)) );
checkCuda( hipMalloc(&dev_Ap, n*sizeof(mytype::real)) );
checkCuda( hipMalloc(&dev_p, n*sizeof(mytype::real)) );
checkCuda( hipMalloc(&dev_r, n*sizeof(mytype::real)) );
#ifdef DEBUG
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for MemAlloc: %f ms\n",time);
#endif
#ifdef DEBUG
checkCuda( hipEventRecord(startEvent, 0) );
#endif
checkCuda( hipMemcpy(dev_A, A, n*n*sizeof(mytype::real), hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(dev_x, x, n*sizeof(mytype::real), hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(dev_b, b, n*sizeof(mytype::real), hipMemcpyHostToDevice) );
#ifdef DEBUG
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for Memcpy: %f ms\n",time);
#endif
/*-----------------------*/
/*-----CG by using cublas-----*/
hipblasHandle_t handle;
checkCublas( hipblasCreate(&handle), "create" );
///r = b - A*x
checkCublas( hipblasScopy(handle, n, dev_b, 1, dev_r, 1), "Dcopy1" );
checkCublas( hipblasSgemv(handle, HIPBLAS_OP_N, n, n, &_N_ONE, dev_A, n, dev_x, 1, &_P_ONE, dev_r, 1), "Dgemv1" );
///p = r
checkCublas( hipblasScopy(handle, n, dev_r, 1, dev_p, 1), "Dcopy2" );
///_rrold = r*r
checkCublas( hipblasSdot(handle, n, dev_r, 1, dev_r, 1, &_rrold), "Ddot1" );
_num = 0;
while( _rrold > mytype::EPS_BY_EPS )
{
///Ap = A*p
checkCublas( hipblasSgemv(handle, HIPBLAS_OP_N, n, n, &_P_ONE, dev_A, n, dev_p, 1, &_ZERO, dev_Ap, 1), "Dgemv2" );
///_alpha = _rrold / Ap*p
checkCublas( hipblasSdot(handle, n, dev_Ap, 1, dev_p, 1, &_alpha), "Ddot2" );
_alpha = _rrold / _alpha;
///x = x + _alpha*p
checkCublas( hipblasSaxpy(handle, n, &_alpha, dev_p, 1, dev_x, 1 ), "Daxpy1" );
///r = r - _alpha*Ap
_alpha = -_alpha;
checkCublas( hipblasSaxpy(handle, n, &_alpha, dev_Ap, 1, dev_r, 1 ), "Daxpy2" );
///_rrnew = r*r
checkCublas( hipblasSdot(handle, n, dev_r, 1, dev_r, 1, &_rrnew), "Ddot2" );
///_rn_over_ro = _rrnew / _rrold
_rn_over_ro = _rrnew / _rrold;
///p = _rn_over_ro*p + r
checkCublas( hipblasSscal(handle, n, &_rn_over_ro, dev_p, 1), "Dscal1" );
checkCublas( hipblasSaxpy(handle, n, &_P_ONE, dev_r, 1, dev_p, 1 ), "Daxpy3" );
///_rrold = _rrnew
_rrold = _rrnew;
_num++;
//printf("CONVERGENCE -> RESIDUAL: %.2e\n",_rrnew);
}
checkCuda( hipMemcpy(x, dev_x, n*sizeof(mytype::real), hipMemcpyDeviceToHost) );
checkCublas( hipblasDestroy(handle), "destroy");
/*----------------------------*/
/*-----device memory-----*/
#ifdef DEBUG
checkCuda( hipEventRecord(startEvent, 0) );
#endif
checkCuda( hipFree(dev_A) );
checkCuda( hipFree(dev_x) );
checkCuda( hipFree(dev_b) );
checkCuda( hipFree(dev_Ap) );
checkCuda( hipFree(dev_p) );
checkCuda( hipFree(dev_r) );
#ifdef DEBUG
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for freeMem: %f ms\n",time);
#endif
/*-----------------------*/
#ifdef DEBUG
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
#endif
printf(" CG -> times: %d \n", _num);
}
__global__ void kerSort_i(mytype::integer* const des,
const mytype::integer* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[dev_i_index[i]] = dev_p[i];
}
}
void dev_sort_i(mytype::integer* const des,
const mytype::integer* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
hipLaunchKernelGGL(( kerSort_i), dim3((n+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, des, dev_p, dev_i_index, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("cpy -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("cpy -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
__global__ void kerSort_d(mytype::real* const des,
const mytype::real* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[dev_i_index[i]] = dev_p[i];
}
}
void dev_sort_d(mytype::real* const des,
const mytype::real* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
hipLaunchKernelGGL(( kerSort_d), dim3((n+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, des, dev_p, dev_i_index, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("cpy -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("cpy -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
__global__ void kerSort_i3(mytype::int3* const des,
const mytype::int3* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[dev_i_index[i]] = dev_p[i];
}
}
void dev_sort_i3(mytype::int3* const des,
const mytype::int3* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
hipLaunchKernelGGL(( kerSort_i3), dim3((n+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, des, dev_p, dev_i_index, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("cpy -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("cpy -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
__global__ void kerSort_d3(mytype::real3* const des,
const mytype::real3* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[dev_i_index[i]] = dev_p[i];
}
}
void dev_sort_d3(mytype::real3* const des,
const mytype::real3* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
hipLaunchKernelGGL(( kerSort_d3), dim3((n+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, des, dev_p, dev_i_index, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("cpy -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("cpy -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
__global__ void kerSort_normal(mytype::integer* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
dev_p[i] = dev_i_index[dev_p[i]];
}
}
void dev_sort_normal( mytype::integer* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n )
{
hipLaunchKernelGGL(( kerSort_normal), dim3((n+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, dev_p, dev_i_index, n);
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("cpy -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("cpy -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
__device__ inline mytype::real dev_d_weight( const mytype::real _r0,
const mytype::real _r )
{
//danger when _r == 0
if(_r >= _r0) return 0.0;
else return (_r0 / _r - 1.0);
}
__global__ void kerCal_n( mytype::real* const dev_d_n,
const mytype::real3* const dev_d3_pos,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_rzero,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < i_np)
{
mytype::real _n = 0.0f;
mytype::real3 _pos_i = dev_d3_pos[i];
//searching neighbors
//loop: surrounding cells including itself (totally 27 cells)
//loop: from bottom to top, from back to front, from left to right
const mytype::integer __offset = 28 * dev_i_cell_list[i];
const mytype::integer __num = dev_i_link_cell[__offset];
for(mytype::integer dir=1;dir<=__num;dir++)
{
mytype::integer __cell = dev_i_link_cell[__offset + dir];
if(__cell < i_num_cells)
{
mytype::integer __start = dev_i_cell_start[__cell];
mytype::integer __end = dev_i_cell_end[__cell];
for(mytype::integer j=__start;j<__end;j++)
{
if(j != i)
{
mytype::real __rr = (dev_d3_pos[j].x - _pos_i.x) * (dev_d3_pos[j].x - _pos_i.x)
+ (dev_d3_pos[j].y - _pos_i.y) * (dev_d3_pos[j].y - _pos_i.y)
+ (dev_d3_pos[j].z - _pos_i.z) * (dev_d3_pos[j].z - _pos_i.z);
_n += dev_d_weight( d_rzero, sqrt(__rr) );
}
}
}
}
dev_d_n[i] = _n;
}
}
void dev_cal_n( mytype::real* const dev_d_n,
mytype::real3* const dev_d3_pos,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_rzero,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
///call routines
hipLaunchKernelGGL(( kerCal_n), dim3((i_np+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, dev_d_n,
dev_d3_pos,
dev_i_cell_list,
dev_i_link_cell,
dev_i_cell_start,
dev_i_cell_end,
d_rzero,
i_num_cells,
i_np );
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("dev_cal_n -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("dev_cal_n -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
__global__ void kerCalDash_tmp ( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::real* const dev_d_press,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_dt,
const mytype::real d_one_over_rho,
const mytype::real d_one_over_nzero,
const mytype::real d_rzero,
const mytype::integer i_dim,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < i_np)
{
/*----------pressure gradient part----------*/
mytype::real3 _ret = {0,0,0};
mytype::real3 _pos_i = dev_d3_pos[i];
mytype::real _hat_p = dev_d_press[i];
const mytype::integer _offset = 28 * dev_i_cell_list[i];
const mytype::integer _num = dev_i_link_cell[_offset];
//searching _hat_p (minimum of p in 27 cells)
//loop: surrounding cells including itself (totally 27 cells)
//loop: from bottom to top, from back to front, from left to right
for(mytype::integer dir=1;dir<=_num;dir++)
{
mytype::integer __cell = dev_i_link_cell[_offset + dir];
if(__cell < i_num_cells)
{
mytype::integer __start = dev_i_cell_start[__cell];
mytype::integer __end = dev_i_cell_end[__cell];
for(mytype::integer j=__start;j<__end;j++)
{
//ignore type 2 particles
//if(dev_i_type[j] != 2)
{
mytype::real __rr = (dev_d3_pos[j].x - _pos_i.x) * (dev_d3_pos[j].x - _pos_i.x)
+ (dev_d3_pos[j].y - _pos_i.y) * (dev_d3_pos[j].y - _pos_i.y)
+ (dev_d3_pos[j].z - _pos_i.z) * (dev_d3_pos[j].z - _pos_i.z);
if( dev_d_press[j] < _hat_p && __rr <= (d_rzero*d_rzero) )
{
_hat_p = dev_d_press[j];
}
}
}
}
}
//searching neighbors
//loop: surrounding cells including itself (totally 27 cells)
//loop: from bottom to top, from back to front, from left to right
for(mytype::integer dir=1;dir<=_num;dir++)
{
mytype::integer __cell = dev_i_link_cell[_offset+dir];
if(__cell < i_num_cells)
{
mytype::integer __start = dev_i_cell_start[__cell];
mytype::integer __end = dev_i_cell_end[__cell];
for(mytype::integer j=__start;j<__end;j++)
{
if(j != i)
{
mytype::real3 __dr;
__dr.x = dev_d3_pos[j].x - _pos_i.x;
__dr.y = dev_d3_pos[j].y - _pos_i.y;
__dr.z = dev_d3_pos[j].z - _pos_i.z;
mytype::real __rr = __dr.x * __dr.x + __dr.y * __dr.y + __dr.z * __dr.z;
mytype::real __coef = (dev_d_press[j] - _hat_p) / __rr * dev_d_weight(d_rzero, sqrt(__rr));
_ret.x += __coef * __dr.x;
_ret.y += __coef * __dr.y;
_ret.z += __coef * __dr.z;
}
}
}
}
mytype::real _coef = - d_dt * d_one_over_rho * i_dim * d_one_over_nzero;
_ret.x *= _coef;
_ret.y *= _coef;
_ret.z *= _coef;
/*-----------------------------------------*/
/*----------cal tmp part----------*/
//only apply to fluid particles
if(dev_i_type[i] == 0)
{
dev_d3_vel[i].x += _ret.x;
dev_d3_vel[i].y += _ret.y;
dev_d3_vel[i].z += _ret.z;
dev_d3_pos[i].x += d_dt * _ret.x;
dev_d3_pos[i].y += d_dt * _ret.y;
dev_d3_pos[i].z += d_dt * _ret.z;
}
/*--------------------------------*/
}
}
void dev_calDash( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::real* const dev_d_press,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_dt,
const mytype::real d_one_over_rho,
const mytype::real d_one_over_nzero,
const mytype::real d_rzero,
const mytype::integer i_dim,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
///call routines
hipLaunchKernelGGL(( kerCalDash_tmp), dim3((i_np+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, dev_d3_vel,
dev_d3_pos,
dev_d_press,
dev_i_type,
dev_i_cell_list,
dev_i_link_cell,
dev_i_cell_start,
dev_i_cell_end,
d_dt,
d_one_over_rho,
d_one_over_nzero,
d_rzero,
i_dim,
i_num_cells,
i_np );
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("dev_calDash -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("dev_calDash -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
__global__ void kerCalPres_fluid_expl( mytype::real* const dev_d_press,
const mytype::real* const dev_d_n,
const mytype::real d_one_over_alpha,
const mytype::real d_nzero,
const mytype::real d_one_over_nzero,
const mytype::integer i_np )
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < i_np)
{
mytype::real _tmp = d_one_over_alpha * (dev_d_n[i] - d_nzero) * d_one_over_nzero;
dev_d_press[i] = (_tmp > 0.0 ? _tmp : 0.0);
}
}
__global__ void kerCalPres_bd2_expl( mytype::real* const dev_d_press,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_normal,
const mytype::integer i_np)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(dev_i_type[i] == 2)
{
dev_d_press[i] = dev_d_press[dev_i_normal[i]];
}
}
void dev_calPres_expl( mytype::real* const dev_d_press,
const mytype::real* const dev_d_n,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_normal,
const mytype::real d_one_over_alpha,
const mytype::real d_nzero,
const mytype::real d_one_over_nzero,
const mytype::integer i_np )
{
///call routines
hipLaunchKernelGGL(( kerCalPres_fluid_expl), dim3((i_np+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, dev_d_press,
dev_d_n,
d_one_over_alpha,
d_nzero,
d_one_over_nzero,
i_np );
hipLaunchKernelGGL(( kerCalPres_bd2_expl), dim3((i_np+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, dev_d_press,
dev_i_type,
dev_i_normal,
i_np );
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("dev_calPres_expl -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("dev_calPres_expl -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
__global__ void kerCalVisc_tmp( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::real* const dev_d_press,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real3 G,
const mytype::real d_dt,
const mytype::real d_2bydim_over_nzerobylambda,
const mytype::real d_rlap,
const mytype::real d_niu,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < i_np)
{
if(dev_i_type[i] == 0)
{
mytype::real3 _ret = {0.0, 0.0, 0.0};
mytype::integer _offset = 28 * dev_i_cell_list[i];
mytype::integer _num = dev_i_link_cell[_offset];
//searching neighbors
//loop: surrounding cells including itself (totally 27 cells)
//loop: from bottom to top, from back to front, from left to right
for(mytype::integer dir=1;dir<=_num;dir++)
{
mytype::integer __cell = dev_i_link_cell[_offset+dir];
if(__cell < i_num_cells)
{
mytype::integer __start = dev_i_cell_start[__cell];
mytype::integer __end = dev_i_cell_end[__cell];
for(mytype::integer j=__start;j<__end;j++)
{
if(j != i)
{
mytype::real3 __dr;
__dr.x = dev_d3_pos[j].x - dev_d3_pos[i].x;
__dr.y = dev_d3_pos[j].y - dev_d3_pos[i].y;
__dr.z = dev_d3_pos[j].z - dev_d3_pos[i].z;
mytype::real3 __du;
__du.x = dev_d3_vel[j].x - dev_d3_vel[i].x;
__du.y = dev_d3_vel[j].y - dev_d3_vel[i].y;
__du.z = dev_d3_vel[j].z - dev_d3_vel[i].z;
mytype::real __tmp = dev_d_weight(d_rlap , sqrt( __dr.x*__dr.x + __dr.y*__dr.y + __dr.z*__dr.z ));
_ret.x += __tmp * __du.x;
_ret.y += __tmp * __du.y;
_ret.z += __tmp * __du.z;
}
}
}
}
mytype::real __coef = d_niu * d_2bydim_over_nzerobylambda;
dev_d3_vel[i].x += d_dt * (__coef * _ret.x + G.x);
dev_d3_vel[i].y += d_dt * (__coef * _ret.y + G.y);
dev_d3_vel[i].z += d_dt * (__coef * _ret.z + G.z);
dev_d3_pos[i].x += d_dt * dev_d3_vel[i].x;
dev_d3_pos[i].y += d_dt * dev_d3_vel[i].y;
dev_d3_pos[i].z += d_dt * dev_d3_vel[i].z;
}
}
}
void dev_calVisc_expl( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::real* const dev_d_press,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_dt,
const mytype::real d_2bydim_over_nzerobylambda,
const mytype::real d_rlap,
const mytype::real d_niu,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
///call routines
hipLaunchKernelGGL(( kerCalVisc_tmp), dim3((i_np+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, dev_d3_vel,
dev_d3_pos,
dev_d_press,
dev_i_type,
dev_i_cell_list,
dev_i_link_cell,
dev_i_cell_start,
dev_i_cell_end,
mytype::G,
d_dt,
d_2bydim_over_nzerobylambda,
d_rlap,
d_niu,
i_num_cells,
i_np );
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("dev_calVisc_tmp -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("dev_calVisc_tmp -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
__global__ void kerCalCol_tmp( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_dt,
const mytype::real d_col_dis,
const mytype::real d_col_rate,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < i_np)
{
mytype::real3 _crt = {0.0, 0.0, 0.0};
if(dev_i_type[i] == 0)
{
//searching neighbors
//loop: surrounding cells including itself (totally 27 cells)
//loop: from bottom to top, from back to front, from left to right
const mytype::integer __offset = 28 * dev_i_cell_list[i];
const mytype::integer __num = dev_i_link_cell[__offset];
for(mytype::integer dir=1;dir<=__num;dir++)
{
mytype::integer __cell = dev_i_link_cell[__offset+dir];
if(__cell < i_num_cells)
{
mytype::integer __start = dev_i_cell_start[__cell];
mytype::integer __end = dev_i_cell_end[__cell];
for(mytype::integer j=__start;j<__end;j++)
{
if(j != i)
{
mytype::real3 __dr;
__dr.x = dev_d3_pos[j].x - dev_d3_pos[i].x;
__dr.y = dev_d3_pos[j].y - dev_d3_pos[i].y;
__dr.z = dev_d3_pos[j].z - dev_d3_pos[i].z;
mytype::real3 __du;
__du.x = dev_d3_vel[j].x - dev_d3_vel[i].x;
__du.y = dev_d3_vel[j].y - dev_d3_vel[i].y;
__du.z = dev_d3_vel[j].z - dev_d3_vel[i].z;
mytype::real __ds = sqrt(__dr.x*__dr.x + __dr.y*__dr.y + __dr.z*__dr.z);
mytype::real __one_over_ds = 1.0f / __ds;
mytype::real __vabs = 0.5f * __one_over_ds * (__du.x*__dr.x + __du.y*__dr.y + __du.z*__dr.z);
if( (__ds <= d_col_dis) && (__vabs <= 0.0) )
{
_crt.x += d_col_rate * __vabs * __one_over_ds * __dr.x;
_crt.y += d_col_rate * __vabs * __one_over_ds * __dr.y;
_crt.z += d_col_rate * __vabs * __one_over_ds * __dr.z;
}
}
}
}
}
}
dev_d3_vel[i].x += _crt.x;
dev_d3_vel[i].y += _crt.y;
dev_d3_vel[i].z += _crt.z;
dev_d3_pos[i].x += d_dt * _crt.x;
dev_d3_pos[i].y += d_dt * _crt.y;
dev_d3_pos[i].z += d_dt * _crt.z;
}
}
void dev_calCol( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_dt,
const mytype::real d_col_dis,
const mytype::real d_col_rate,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
///call routines
hipLaunchKernelGGL(( kerCalCol_tmp), dim3((i_np+NUM_THREADS-1)/NUM_THREADS), dim3(NUM_THREADS), 0, 0, dev_d3_vel,
dev_d3_pos,
dev_i_type,
dev_i_cell_list,
dev_i_link_cell,
dev_i_cell_start,
dev_i_cell_end,
d_dt,
d_col_dis,
d_col_rate,
i_num_cells,
i_np );
#ifdef DEBUG
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if(errSync != hipSuccess) printf("dev_calCol -> Sync kernnel error: %s\n", hipGetErrorString(errSync));
if(errAsync != hipSuccess) printf("dev_calCol -> Async kernnel error: %s\n", hipGetErrorString(errAsync));
#endif
}
}//namespace
| 29132e1a7d669c20cd2cd00326db2c67c2089f42.cu | /*
LICENCE
*/
//mps_gpu_cudaker.h
///implementation of cuda kernel functions
#include <cmath>
#include <cstdio>
#include <cassert>
#include "mps_gpu_cudaker.h"
#include "typedef.h"
#include "common.h"
#include "MPS_GPU.h"
namespace cudaker
{
inline cudaError_t checkCuda(cudaError_t result)
{
#ifdef DEBUG
if(result != cudaSuccess)
{
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
inline cublasStatus_t checkCublas(cublasStatus_t result, char* msg)
{
#ifdef DEBUG
if(result != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "cublas Runtime Error: %s\n", msg);
assert(result == CUBLAS_STATUS_SUCCESS);
}
#endif
return result;
}
__global__ void kerVecCpy(mytype::real* const des,
const mytype::real* const src,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[i] = src[i];
}
}
__global__ void kerAxpy(mytype::real* const z,
const mytype::real* const x,
const mytype::real* const y,
const mytype::real a, const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
z[i] = a * x[i] + y[i];
}
}
__global__ void kerMatVec(mytype::real* const des,
const mytype::real* const mat,
const mytype::real* const vec,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
mytype::real _tmp = 0.0;
if(i < n)
{
for(mytype::integer j=0;j<n;j++)
{
_tmp += mat[i*n+j] * vec[j];
}
des[i] = _tmp;
}
}
__global__ void kerVecVec(mytype::real& des,
const mytype::real* const vec1,
const mytype::real* const vec2,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
mytype::real _tmp = 0.0;
if(i == 0)
{
for(mytype::integer j=0;j<n;j++)
{
_tmp += vec1[j] * vec2[j];
}
des = _tmp;
}
}
void VecCpy(mytype::real* const des,
const mytype::real* const src,
const mytype::integer n)
{
kerVecCpy<<<(n+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>(des, src, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("cpy -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("cpy -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
void Axpy(mytype::real* const z,
const mytype::real* const x,
const mytype::real* const y,
const mytype::real a, const mytype::integer n)
{
kerAxpy<<<(n+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>(z, x, y, a, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("Axpy -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("Axpy -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
void MatVec(mytype::real* const des,
const mytype::real* const mat,
const mytype::real* const vec,
const mytype::integer n)
{
kerMatVec<<<(n+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>(des, mat, vec, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("MV -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("MV -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
void VecVec(mytype::real& des,
const mytype::real* const vec1,
const mytype::real* const vec2,
const mytype::integer n)
{
kerVecVec<<<1, NUM_THREADS>>>(des, vec1, vec2, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("VV -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("VV -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
void CG(const mytype::real* const A, mytype::real* const x, const mytype::real* const b, const mytype::integer n)
{
const mytype::real _ZERO = 0.0;
const mytype::real _P_ONE = 1.0;
const mytype::real _N_ONE = -1.0;
int _num;
mytype::real _rrold;
mytype::real _rrnew;
mytype::real _alpha;
mytype::real _rn_over_ro;
/*-----device memory-----*/
mytype::real* dev_A;
mytype::real* dev_x;
mytype::real* dev_b;
mytype::real* dev_Ap;
mytype::real* dev_p;
mytype::real* dev_r;
#ifdef DEBUG
float time;
cudaEvent_t startEvent, stopEvent;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
checkCuda( cudaEventRecord(startEvent, 0) );
#endif
checkCuda( cudaMalloc(&dev_A, n*n*sizeof(mytype::real)) );
checkCuda( cudaMalloc(&dev_x, n*sizeof(mytype::real)) );
checkCuda( cudaMalloc(&dev_b, n*sizeof(mytype::real)) );
checkCuda( cudaMalloc(&dev_Ap, n*sizeof(mytype::real)) );
checkCuda( cudaMalloc(&dev_p, n*sizeof(mytype::real)) );
checkCuda( cudaMalloc(&dev_r, n*sizeof(mytype::real)) );
#ifdef DEBUG
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for MemAlloc: %f ms\n",time);
#endif
#ifdef DEBUG
checkCuda( cudaEventRecord(startEvent, 0) );
#endif
checkCuda( cudaMemcpy(dev_A, A, n*n*sizeof(mytype::real), cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(dev_x, x, n*sizeof(mytype::real), cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(dev_b, b, n*sizeof(mytype::real), cudaMemcpyHostToDevice) );
#ifdef DEBUG
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for Memcpy: %f ms\n",time);
#endif
/*-----------------------*/
/*-----CG by using cublas-----*/
cublasHandle_t handle;
checkCublas( cublasCreate(&handle), "create" );
///r = b - A*x
checkCublas( cublasScopy(handle, n, dev_b, 1, dev_r, 1), "Dcopy1" );
checkCublas( cublasSgemv(handle, CUBLAS_OP_N, n, n, &_N_ONE, dev_A, n, dev_x, 1, &_P_ONE, dev_r, 1), "Dgemv1" );
///p = r
checkCublas( cublasScopy(handle, n, dev_r, 1, dev_p, 1), "Dcopy2" );
///_rrold = r*r
checkCublas( cublasSdot(handle, n, dev_r, 1, dev_r, 1, &_rrold), "Ddot1" );
_num = 0;
while( _rrold > mytype::EPS_BY_EPS )
{
///Ap = A*p
checkCublas( cublasSgemv(handle, CUBLAS_OP_N, n, n, &_P_ONE, dev_A, n, dev_p, 1, &_ZERO, dev_Ap, 1), "Dgemv2" );
///_alpha = _rrold / Ap*p
checkCublas( cublasSdot(handle, n, dev_Ap, 1, dev_p, 1, &_alpha), "Ddot2" );
_alpha = _rrold / _alpha;
///x = x + _alpha*p
checkCublas( cublasSaxpy(handle, n, &_alpha, dev_p, 1, dev_x, 1 ), "Daxpy1" );
///r = r - _alpha*Ap
_alpha = -_alpha;
checkCublas( cublasSaxpy(handle, n, &_alpha, dev_Ap, 1, dev_r, 1 ), "Daxpy2" );
///_rrnew = r*r
checkCublas( cublasSdot(handle, n, dev_r, 1, dev_r, 1, &_rrnew), "Ddot2" );
///_rn_over_ro = _rrnew / _rrold
_rn_over_ro = _rrnew / _rrold;
///p = _rn_over_ro*p + r
checkCublas( cublasSscal(handle, n, &_rn_over_ro, dev_p, 1), "Dscal1" );
checkCublas( cublasSaxpy(handle, n, &_P_ONE, dev_r, 1, dev_p, 1 ), "Daxpy3" );
///_rrold = _rrnew
_rrold = _rrnew;
_num++;
//printf("CONVERGENCE -> RESIDUAL: %.2e\n",_rrnew);
}
checkCuda( cudaMemcpy(x, dev_x, n*sizeof(mytype::real), cudaMemcpyDeviceToHost) );
checkCublas( cublasDestroy(handle), "destroy");
/*----------------------------*/
/*-----device memory-----*/
#ifdef DEBUG
checkCuda( cudaEventRecord(startEvent, 0) );
#endif
checkCuda( cudaFree(dev_A) );
checkCuda( cudaFree(dev_x) );
checkCuda( cudaFree(dev_b) );
checkCuda( cudaFree(dev_Ap) );
checkCuda( cudaFree(dev_p) );
checkCuda( cudaFree(dev_r) );
#ifdef DEBUG
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf("time for freeMem: %f ms\n",time);
#endif
/*-----------------------*/
#ifdef DEBUG
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
#endif
printf(" CG -> times: %d \n", _num);
}
__global__ void kerSort_i(mytype::integer* const des,
const mytype::integer* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[dev_i_index[i]] = dev_p[i];
}
}
void dev_sort_i(mytype::integer* const des,
const mytype::integer* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
kerSort_i<<<(n+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>(des, dev_p, dev_i_index, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("cpy -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("cpy -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
__global__ void kerSort_d(mytype::real* const des,
const mytype::real* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[dev_i_index[i]] = dev_p[i];
}
}
void dev_sort_d(mytype::real* const des,
const mytype::real* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
kerSort_d<<<(n+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>(des, dev_p, dev_i_index, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("cpy -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("cpy -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
__global__ void kerSort_i3(mytype::int3* const des,
const mytype::int3* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[dev_i_index[i]] = dev_p[i];
}
}
void dev_sort_i3(mytype::int3* const des,
const mytype::int3* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
kerSort_i3<<<(n+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>(des, dev_p, dev_i_index, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("cpy -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("cpy -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
__global__ void kerSort_d3(mytype::real3* const des,
const mytype::real3* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
des[dev_i_index[i]] = dev_p[i];
}
}
void dev_sort_d3(mytype::real3* const des,
const mytype::real3* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
kerSort_d3<<<(n+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>(des, dev_p, dev_i_index, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("cpy -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("cpy -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
__global__ void kerSort_normal(mytype::integer* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
dev_p[i] = dev_i_index[dev_p[i]];
}
}
void dev_sort_normal( mytype::integer* const dev_p,
const mytype::integer* const dev_i_index,
const mytype::integer n )
{
kerSort_normal<<<(n+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>(dev_p, dev_i_index, n);
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("cpy -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("cpy -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
__device__ inline mytype::real dev_d_weight( const mytype::real _r0,
const mytype::real _r )
{
//danger when _r == 0
if(_r >= _r0) return 0.0;
else return (_r0 / _r - 1.0);
}
__global__ void kerCal_n( mytype::real* const dev_d_n,
const mytype::real3* const dev_d3_pos,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_rzero,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < i_np)
{
mytype::real _n = 0.0f;
mytype::real3 _pos_i = dev_d3_pos[i];
//searching neighbors
//loop: surrounding cells including itself (totally 27 cells)
//loop: from bottom to top, from back to front, from left to right
const mytype::integer __offset = 28 * dev_i_cell_list[i];
const mytype::integer __num = dev_i_link_cell[__offset];
for(mytype::integer dir=1;dir<=__num;dir++)
{
mytype::integer __cell = dev_i_link_cell[__offset + dir];
if(__cell < i_num_cells)
{
mytype::integer __start = dev_i_cell_start[__cell];
mytype::integer __end = dev_i_cell_end[__cell];
for(mytype::integer j=__start;j<__end;j++)
{
if(j != i)
{
mytype::real __rr = (dev_d3_pos[j].x - _pos_i.x) * (dev_d3_pos[j].x - _pos_i.x)
+ (dev_d3_pos[j].y - _pos_i.y) * (dev_d3_pos[j].y - _pos_i.y)
+ (dev_d3_pos[j].z - _pos_i.z) * (dev_d3_pos[j].z - _pos_i.z);
_n += dev_d_weight( d_rzero, sqrt(__rr) );
}
}
}
}
dev_d_n[i] = _n;
}
}
void dev_cal_n( mytype::real* const dev_d_n,
mytype::real3* const dev_d3_pos,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_rzero,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
///call routines
kerCal_n<<<(i_np+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>( dev_d_n,
dev_d3_pos,
dev_i_cell_list,
dev_i_link_cell,
dev_i_cell_start,
dev_i_cell_end,
d_rzero,
i_num_cells,
i_np );
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("dev_cal_n -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("dev_cal_n -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
__global__ void kerCalDash_tmp ( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::real* const dev_d_press,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_dt,
const mytype::real d_one_over_rho,
const mytype::real d_one_over_nzero,
const mytype::real d_rzero,
const mytype::integer i_dim,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < i_np)
{
/*----------pressure gradient part----------*/
mytype::real3 _ret = {0,0,0};
mytype::real3 _pos_i = dev_d3_pos[i];
mytype::real _hat_p = dev_d_press[i];
const mytype::integer _offset = 28 * dev_i_cell_list[i];
const mytype::integer _num = dev_i_link_cell[_offset];
//searching _hat_p (minimum of p in 27 cells)
//loop: surrounding cells including itself (totally 27 cells)
//loop: from bottom to top, from back to front, from left to right
for(mytype::integer dir=1;dir<=_num;dir++)
{
mytype::integer __cell = dev_i_link_cell[_offset + dir];
if(__cell < i_num_cells)
{
mytype::integer __start = dev_i_cell_start[__cell];
mytype::integer __end = dev_i_cell_end[__cell];
for(mytype::integer j=__start;j<__end;j++)
{
//ignore type 2 particles
//if(dev_i_type[j] != 2)
{
mytype::real __rr = (dev_d3_pos[j].x - _pos_i.x) * (dev_d3_pos[j].x - _pos_i.x)
+ (dev_d3_pos[j].y - _pos_i.y) * (dev_d3_pos[j].y - _pos_i.y)
+ (dev_d3_pos[j].z - _pos_i.z) * (dev_d3_pos[j].z - _pos_i.z);
if( dev_d_press[j] < _hat_p && __rr <= (d_rzero*d_rzero) )
{
_hat_p = dev_d_press[j];
}
}
}
}
}
//searching neighbors
//loop: surrounding cells including itself (totally 27 cells)
//loop: from bottom to top, from back to front, from left to right
for(mytype::integer dir=1;dir<=_num;dir++)
{
mytype::integer __cell = dev_i_link_cell[_offset+dir];
if(__cell < i_num_cells)
{
mytype::integer __start = dev_i_cell_start[__cell];
mytype::integer __end = dev_i_cell_end[__cell];
for(mytype::integer j=__start;j<__end;j++)
{
if(j != i)
{
mytype::real3 __dr;
__dr.x = dev_d3_pos[j].x - _pos_i.x;
__dr.y = dev_d3_pos[j].y - _pos_i.y;
__dr.z = dev_d3_pos[j].z - _pos_i.z;
mytype::real __rr = __dr.x * __dr.x + __dr.y * __dr.y + __dr.z * __dr.z;
mytype::real __coef = (dev_d_press[j] - _hat_p) / __rr * dev_d_weight(d_rzero, sqrt(__rr));
_ret.x += __coef * __dr.x;
_ret.y += __coef * __dr.y;
_ret.z += __coef * __dr.z;
}
}
}
}
mytype::real _coef = - d_dt * d_one_over_rho * i_dim * d_one_over_nzero;
_ret.x *= _coef;
_ret.y *= _coef;
_ret.z *= _coef;
/*-----------------------------------------*/
/*----------cal tmp part----------*/
//only apply to fluid particles
if(dev_i_type[i] == 0)
{
dev_d3_vel[i].x += _ret.x;
dev_d3_vel[i].y += _ret.y;
dev_d3_vel[i].z += _ret.z;
dev_d3_pos[i].x += d_dt * _ret.x;
dev_d3_pos[i].y += d_dt * _ret.y;
dev_d3_pos[i].z += d_dt * _ret.z;
}
/*--------------------------------*/
}
}
void dev_calDash( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::real* const dev_d_press,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_dt,
const mytype::real d_one_over_rho,
const mytype::real d_one_over_nzero,
const mytype::real d_rzero,
const mytype::integer i_dim,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
///call routines
kerCalDash_tmp<<<(i_np+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>( dev_d3_vel,
dev_d3_pos,
dev_d_press,
dev_i_type,
dev_i_cell_list,
dev_i_link_cell,
dev_i_cell_start,
dev_i_cell_end,
d_dt,
d_one_over_rho,
d_one_over_nzero,
d_rzero,
i_dim,
i_num_cells,
i_np );
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("dev_calDash -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("dev_calDash -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
__global__ void kerCalPres_fluid_expl( mytype::real* const dev_d_press,
const mytype::real* const dev_d_n,
const mytype::real d_one_over_alpha,
const mytype::real d_nzero,
const mytype::real d_one_over_nzero,
const mytype::integer i_np )
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < i_np)
{
mytype::real _tmp = d_one_over_alpha * (dev_d_n[i] - d_nzero) * d_one_over_nzero;
dev_d_press[i] = (_tmp > 0.0 ? _tmp : 0.0);
}
}
__global__ void kerCalPres_bd2_expl( mytype::real* const dev_d_press,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_normal,
const mytype::integer i_np)
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(dev_i_type[i] == 2)
{
dev_d_press[i] = dev_d_press[dev_i_normal[i]];
}
}
void dev_calPres_expl( mytype::real* const dev_d_press,
const mytype::real* const dev_d_n,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_normal,
const mytype::real d_one_over_alpha,
const mytype::real d_nzero,
const mytype::real d_one_over_nzero,
const mytype::integer i_np )
{
///call routines
kerCalPres_fluid_expl<<<(i_np+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>( dev_d_press,
dev_d_n,
d_one_over_alpha,
d_nzero,
d_one_over_nzero,
i_np );
kerCalPres_bd2_expl<<<(i_np+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>( dev_d_press,
dev_i_type,
dev_i_normal,
i_np );
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("dev_calPres_expl -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("dev_calPres_expl -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
__global__ void kerCalVisc_tmp( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::real* const dev_d_press,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real3 G,
const mytype::real d_dt,
const mytype::real d_2bydim_over_nzerobylambda,
const mytype::real d_rlap,
const mytype::real d_niu,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < i_np)
{
if(dev_i_type[i] == 0)
{
mytype::real3 _ret = {0.0, 0.0, 0.0};
mytype::integer _offset = 28 * dev_i_cell_list[i];
mytype::integer _num = dev_i_link_cell[_offset];
//searching neighbors
//loop: surrounding cells including itself (totally 27 cells)
//loop: from bottom to top, from back to front, from left to right
for(mytype::integer dir=1;dir<=_num;dir++)
{
mytype::integer __cell = dev_i_link_cell[_offset+dir];
if(__cell < i_num_cells)
{
mytype::integer __start = dev_i_cell_start[__cell];
mytype::integer __end = dev_i_cell_end[__cell];
for(mytype::integer j=__start;j<__end;j++)
{
if(j != i)
{
mytype::real3 __dr;
__dr.x = dev_d3_pos[j].x - dev_d3_pos[i].x;
__dr.y = dev_d3_pos[j].y - dev_d3_pos[i].y;
__dr.z = dev_d3_pos[j].z - dev_d3_pos[i].z;
mytype::real3 __du;
__du.x = dev_d3_vel[j].x - dev_d3_vel[i].x;
__du.y = dev_d3_vel[j].y - dev_d3_vel[i].y;
__du.z = dev_d3_vel[j].z - dev_d3_vel[i].z;
mytype::real __tmp = dev_d_weight(d_rlap , sqrt( __dr.x*__dr.x + __dr.y*__dr.y + __dr.z*__dr.z ));
_ret.x += __tmp * __du.x;
_ret.y += __tmp * __du.y;
_ret.z += __tmp * __du.z;
}
}
}
}
mytype::real __coef = d_niu * d_2bydim_over_nzerobylambda;
dev_d3_vel[i].x += d_dt * (__coef * _ret.x + G.x);
dev_d3_vel[i].y += d_dt * (__coef * _ret.y + G.y);
dev_d3_vel[i].z += d_dt * (__coef * _ret.z + G.z);
dev_d3_pos[i].x += d_dt * dev_d3_vel[i].x;
dev_d3_pos[i].y += d_dt * dev_d3_vel[i].y;
dev_d3_pos[i].z += d_dt * dev_d3_vel[i].z;
}
}
}
void dev_calVisc_expl( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::real* const dev_d_press,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_dt,
const mytype::real d_2bydim_over_nzerobylambda,
const mytype::real d_rlap,
const mytype::real d_niu,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
///call routines
kerCalVisc_tmp<<<(i_np+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>( dev_d3_vel,
dev_d3_pos,
dev_d_press,
dev_i_type,
dev_i_cell_list,
dev_i_link_cell,
dev_i_cell_start,
dev_i_cell_end,
mytype::G,
d_dt,
d_2bydim_over_nzerobylambda,
d_rlap,
d_niu,
i_num_cells,
i_np );
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("dev_calVisc_tmp -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("dev_calVisc_tmp -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
__global__ void kerCalCol_tmp( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_dt,
const mytype::real d_col_dis,
const mytype::real d_col_rate,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
const mytype::integer i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < i_np)
{
mytype::real3 _crt = {0.0, 0.0, 0.0};
if(dev_i_type[i] == 0)
{
//searching neighbors
//loop: surrounding cells including itself (totally 27 cells)
//loop: from bottom to top, from back to front, from left to right
const mytype::integer __offset = 28 * dev_i_cell_list[i];
const mytype::integer __num = dev_i_link_cell[__offset];
for(mytype::integer dir=1;dir<=__num;dir++)
{
mytype::integer __cell = dev_i_link_cell[__offset+dir];
if(__cell < i_num_cells)
{
mytype::integer __start = dev_i_cell_start[__cell];
mytype::integer __end = dev_i_cell_end[__cell];
for(mytype::integer j=__start;j<__end;j++)
{
if(j != i)
{
mytype::real3 __dr;
__dr.x = dev_d3_pos[j].x - dev_d3_pos[i].x;
__dr.y = dev_d3_pos[j].y - dev_d3_pos[i].y;
__dr.z = dev_d3_pos[j].z - dev_d3_pos[i].z;
mytype::real3 __du;
__du.x = dev_d3_vel[j].x - dev_d3_vel[i].x;
__du.y = dev_d3_vel[j].y - dev_d3_vel[i].y;
__du.z = dev_d3_vel[j].z - dev_d3_vel[i].z;
mytype::real __ds = sqrt(__dr.x*__dr.x + __dr.y*__dr.y + __dr.z*__dr.z);
mytype::real __one_over_ds = 1.0f / __ds;
mytype::real __vabs = 0.5f * __one_over_ds * (__du.x*__dr.x + __du.y*__dr.y + __du.z*__dr.z);
if( (__ds <= d_col_dis) && (__vabs <= 0.0) )
{
_crt.x += d_col_rate * __vabs * __one_over_ds * __dr.x;
_crt.y += d_col_rate * __vabs * __one_over_ds * __dr.y;
_crt.z += d_col_rate * __vabs * __one_over_ds * __dr.z;
}
}
}
}
}
}
dev_d3_vel[i].x += _crt.x;
dev_d3_vel[i].y += _crt.y;
dev_d3_vel[i].z += _crt.z;
dev_d3_pos[i].x += d_dt * _crt.x;
dev_d3_pos[i].y += d_dt * _crt.y;
dev_d3_pos[i].z += d_dt * _crt.z;
}
}
void dev_calCol( mytype::real3* const dev_d3_vel,
mytype::real3* const dev_d3_pos,
const mytype::integer* const dev_i_type,
const mytype::integer* const dev_i_cell_list,
const mytype::integer* const dev_i_link_cell,
const mytype::integer* const dev_i_cell_start,
const mytype::integer* const dev_i_cell_end,
const mytype::real d_dt,
const mytype::real d_col_dis,
const mytype::real d_col_rate,
const mytype::integer i_num_cells,
const mytype::integer i_np )
{
///call routines
kerCalCol_tmp<<<(i_np+NUM_THREADS-1)/NUM_THREADS, NUM_THREADS>>>( dev_d3_vel,
dev_d3_pos,
dev_i_type,
dev_i_cell_list,
dev_i_link_cell,
dev_i_cell_start,
dev_i_cell_end,
d_dt,
d_col_dis,
d_col_rate,
i_num_cells,
i_np );
#ifdef DEBUG
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess) printf("dev_calCol -> Sync kernnel error: %s\n", cudaGetErrorString(errSync));
if(errAsync != cudaSuccess) printf("dev_calCol -> Async kernnel error: %s\n", cudaGetErrorString(errAsync));
#endif
}
}//namespace
|
7f5735e2aaf81a2cc47585b1c6f2ad631d731127.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* FIXME: Edit this file to complete the functionality of 2D separable
* convolution on the GPU. You may add additional kernel functions
* as necessary.
*/
__global__ void convolve_rows_kernel_naive()
{
return;
}
__global__ void convolve_columns_kernel_naive()
{
return;
}
__global__ void convolve_rows_kernel_optimized()
{
return;
}
__global__ void convolve_columns_kernel_optimized()
{
return;
}
| 7f5735e2aaf81a2cc47585b1c6f2ad631d731127.cu | /* FIXME: Edit this file to complete the functionality of 2D separable
* convolution on the GPU. You may add additional kernel functions
* as necessary.
*/
__global__ void convolve_rows_kernel_naive()
{
return;
}
__global__ void convolve_columns_kernel_naive()
{
return;
}
__global__ void convolve_rows_kernel_optimized()
{
return;
}
__global__ void convolve_columns_kernel_optimized()
{
return;
}
|
32b91be307dc6babe161863829c6443d18ec0a77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/layer.hpp"
#include "caffe/solver.hpp"
#include<cfloat>
namespace caffe {
template <typename Dtype>
static __global__ void scale_kernel(int count, int image_dim, Dtype sec_loss_weight, const Dtype *in, const Dtype *coef, Dtype *out)
{
CUDA_KERNEL_LOOP(i, count)
{
int n = i / image_dim;
out[i] = 2 * sec_loss_weight *(coef[n]-1)/ coef[n] * in[i];
}
}
template <typename Dtype>
static __global__ void compute_sum(int image_dim, const Dtype *in, Dtype *out)
{
__shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS];
buffer[threadIdx.x] = 0;
for (int i = threadIdx.x;i < image_dim;i += blockDim.x)
buffer[threadIdx.x] += in[blockIdx.x*image_dim+i]*in[blockIdx.x*image_dim+i];
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
buffer[threadIdx.x] += buffer[threadIdx.x+s];
__syncthreads();
}
if (threadIdx.x == 0)
out[blockIdx.x] = sqrt(buffer[0]);
}
template <typename Dtype>
void Layer<Dtype>::compute_sec_loss(const vector<Blob<Dtype>*>& top, const Dtype sec_loss_weight)
{
vector<shared_ptr<Blob<Dtype> > > sum_;
sum_.resize(top.size());
for (int i=0;i < top.size();i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i%NGPUS]));
int num = top[i]->num();
int channels = top[i]->channels();
int height = top[i]->height();
int width = top[i]->width();
sum_[i].reset(new Blob<Dtype>(num,1,1,1));
hipLaunchKernelGGL(( compute_sum), dim3(num),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
channels*height*width,top[i]->gpu_diff(),sum_[i]->mutable_gpu_data());
if (Solver<Dtype>::iter() % 1000 == 0)
{
Dtype sum = 0;
for (int iter = 0;iter<num;iter++)
sum += sum_[i]->cpu_data()[iter];
LOG(INFO)<<"sum = "<<sum/Dtype(num);
}
hipLaunchKernelGGL(( scale_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(top[i]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[i]->count(), channels*height*width, sec_loss_weight, top[i]->gpu_diff(), sum_[i]->gpu_data(), top[i]->mutable_gpu_sec_diff());
caffe_gpu_scal(top[i]->count(),Dtype(1)/Dtype(num),top[i]->mutable_gpu_sec_diff());
}
CUDA_CHECK(hipSetDevice(Caffe::GPUs[0]));
}
//----------------------------------------- proto <-> memory--------------------
template <typename Dtype>
void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff)
{
param->Clear();
param->CopyFrom(layer_param_);
param->clear_blobs();
for (int i = 0; i < blobs_.size(); ++i)
blobs_[i]->ToProto(param->add_blobs(), write_diff);
}
INSTANTIATE_CLASS(Layer);
}
| 32b91be307dc6babe161863829c6443d18ec0a77.cu | #include "caffe/layer.hpp"
#include "caffe/solver.hpp"
#include<cfloat>
namespace caffe {
template <typename Dtype>
static __global__ void scale_kernel(int count, int image_dim, Dtype sec_loss_weight, const Dtype *in, const Dtype *coef, Dtype *out)
{
CUDA_KERNEL_LOOP(i, count)
{
int n = i / image_dim;
out[i] = 2 * sec_loss_weight *(coef[n]-1)/ coef[n] * in[i];
}
}
template <typename Dtype>
static __global__ void compute_sum(int image_dim, const Dtype *in, Dtype *out)
{
__shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS];
buffer[threadIdx.x] = 0;
for (int i = threadIdx.x;i < image_dim;i += blockDim.x)
buffer[threadIdx.x] += in[blockIdx.x*image_dim+i]*in[blockIdx.x*image_dim+i];
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (threadIdx.x < s)
buffer[threadIdx.x] += buffer[threadIdx.x+s];
__syncthreads();
}
if (threadIdx.x == 0)
out[blockIdx.x] = sqrt(buffer[0]);
}
template <typename Dtype>
void Layer<Dtype>::compute_sec_loss(const vector<Blob<Dtype>*>& top, const Dtype sec_loss_weight)
{
vector<shared_ptr<Blob<Dtype> > > sum_;
sum_.resize(top.size());
for (int i=0;i < top.size();i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i%NGPUS]));
int num = top[i]->num();
int channels = top[i]->channels();
int height = top[i]->height();
int width = top[i]->width();
sum_[i].reset(new Blob<Dtype>(num,1,1,1));
compute_sum<<<num,CAFFE_CUDA_NUM_THREADS>>>
(channels*height*width,top[i]->gpu_diff(),sum_[i]->mutable_gpu_data());
if (Solver<Dtype>::iter() % 1000 == 0)
{
Dtype sum = 0;
for (int iter = 0;iter<num;iter++)
sum += sum_[i]->cpu_data()[iter];
LOG(INFO)<<"sum = "<<sum/Dtype(num);
}
scale_kernel<Dtype><<<CAFFE_GET_BLOCKS(top[i]->count()), CAFFE_CUDA_NUM_THREADS>>>
(top[i]->count(), channels*height*width, sec_loss_weight, top[i]->gpu_diff(), sum_[i]->gpu_data(), top[i]->mutable_gpu_sec_diff());
caffe_gpu_scal(top[i]->count(),Dtype(1)/Dtype(num),top[i]->mutable_gpu_sec_diff());
}
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[0]));
}
//----------------------------------------- proto <-> memory--------------------
template <typename Dtype>
void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff)
{
param->Clear();
param->CopyFrom(layer_param_);
param->clear_blobs();
for (int i = 0; i < blobs_.size(); ++i)
blobs_[i]->ToProto(param->add_blobs(), write_diff);
}
INSTANTIATE_CLASS(Layer);
}
|
b283cb7959ed621a3af060d846481c5ff41b3017.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "base/Range.hh"
__global__ void move(const unsigned int max_steps, double distance, double* x) {
auto start = (blockIdx.x * blockDim.x + threadIdx.x) * max_steps;
for (unsigned int i = 0; i != max_steps; ++i) {
x[start + i] += distance;
}
}
| b283cb7959ed621a3af060d846481c5ff41b3017.cu | #include "base/Range.hh"
__global__ void move(const unsigned int max_steps, double distance, double* x) {
auto start = (blockIdx.x * blockDim.x + threadIdx.x) * max_steps;
for (unsigned int i = 0; i != max_steps; ++i) {
x[start + i] += distance;
}
}
|
d72b8242bc31fa44e0ca4c81356eb30de80bf556.hip | // !!! This is a file automatically generated by hipify!!!
/****
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****/
#include "mpi.h"
#include "mp.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include <string.h>
#include <stdio.h>
#include "assert.h"
#include <limits.h>
#include <sys/types.h>
#include <unistd.h>
#include "prof.h"
#include <hip/hip_runtime_api.h>
#include <mp/device.cuh>
#include "roctracer/roctx.h"
#define MPI_CHECK(stmt) \
do { \
int result = (stmt); \
if (MPI_SUCCESS != result) { \
char string[MPI_MAX_ERROR_STRING]; \
int resultlen = 0; \
MPI_Error_string(result, string, &resultlen); \
fprintf(stderr, " (%s:%d) MPI check failed with %d (%*s)\n", \
__FILE__, __LINE__, result, resultlen, string); \
exit(-1); \
} \
} while(0)
#define CUDA_CHECK(stmt) \
do { \
hipError_t result = (stmt); \
if (hipSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n", \
__FILE__, __LINE__,hipGetErrorString(result));\
exit(-1); \
} \
assert(hipSuccess == result); \
} while (0)
#define MP_CHECK(stmt) \
do { \
int result = (stmt); \
if (0 != result) { \
fprintf(stderr, "[%s:%d] mp call failed \n", \
__FILE__, __LINE__); \
exit(-1); \
} \
assert(0 == result); \
} while (0)
int enable_debug_prints = 0;
#define mp_dbg_msg(FMT, ARGS...) do \
{ \
if (enable_debug_prints) { \
fprintf(stderr, "[%d] [%d] MP DBG %s() " FMT, getpid(), my_rank, __FUNCTION__ , ## ARGS); \
fflush(stderr); \
} \
} while(0)
#define MAX_SIZE 4096 //128*1024
#define ITER_COUNT_SMALL (2*1024)
#define ITER_COUNT_LARGE 256
//-------------------------------- NVTX -----------------------------------------
const uint32_t colors[] = { 0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE roctxRangePop();
//-------------------------------------------------------------------------------
struct prof prof_async;
int prof_start = 0;
int prof_idx = 0;
int comm_size, my_rank, peer;
int steps_per_batch = 16, batches_inflight = 4;
mp::mlx5::send_desc_t *tx;
mp::mlx5::send_desc_t *tx_d;
mp::mlx5::wait_desc_t *tx_wait;
mp::mlx5::wait_desc_t *tx_wait_d;
mp::mlx5::wait_desc_t *rx_wait;
mp::mlx5::wait_desc_t *rx_wait_d;
__device__ int counter;
__device__ int clockrate;
__device__ void dummy_kernel(double time)
{
long long int start, stop;
double usec;
start = clock64();
do {
stop = clock64();
usec = ((double)(stop-start)*1000)/((double)clockrate);
counter = usec;
} while(usec < time);
}
__global__ void exchange_kernel(int my_rank,
mp::mlx5::send_desc_t *tx_d,
mp::mlx5::wait_desc_t *tx_wait_d,
mp::mlx5::wait_desc_t *rx_wait_d,
int iter_number, double kernel_time)
{
int i;
assert(gridDim.x == 1);
/*
* dummy_kernel + threadfence: simulate some work after wait
*/
for (i=0; i<iter_number; ++i) {
if (!my_rank) {
if (0 == threadIdx.x) {
//Kernel send
mp::device::mlx5::send(tx_d[i]);
mp::device::mlx5::wait(tx_wait_d[i]);
mp::device::mlx5::signal(tx_wait_d[i]);
//Kernel receive
mp::device::mlx5::wait(rx_wait_d[i]);
mp::device::mlx5::signal(rx_wait_d[i]);
}
//Must be sure that data have been correctly received
__syncthreads();
dummy_kernel(kernel_time);
__threadfence();
} else {
if (0 == threadIdx.x) {
//Kernel send
mp::device::mlx5::wait(rx_wait_d[i]);
mp::device::mlx5::signal(rx_wait_d[i]);
}
//Must be sure that data have been correctly received
dummy_kernel(kernel_time);
__threadfence();
// make sure NIC can fetch coherent data
if (0 == threadIdx.x) {
mp::device::mlx5::send(tx_d[i]);
mp::device::mlx5::wait(tx_wait_d[i]);
mp::device::mlx5::signal(tx_wait_d[i]);
}
__syncthreads();
}
}
}
/*application and pack buffers*/
int sreq_max_inflight = 0, rreq_max_inflight = 0, prepost_depth = 0;
size_t buf_size;
int gpu_id = -1;
int wait_key = 0;
double time_start, time_stop;
hipStream_t stream;
/*mp specific objects*/
mp_request_t *sreq = NULL;
mp_request_t *rreq = NULL;
double sr_exchange (MPI_Comm comm, int size, int iter_count, int validate, double kernel_time, struct prof *prof)
{
int i, j, cycle_index, buf_index;
double latency;
double time_start, time_stop;
int sreq_idx = 0, rreq_idx = 0, complete_sreq_idx = 0, complete_rreq_idx = 0;
int sreq_inflight = 0, rreq_inflight = 0;
/*application and pack buffers*/
void *buf = NULL, *sbuf_d = NULL, *rbuf_d = NULL;
mp_reg_t sreg, rreg;
CUDA_CHECK( hipHostMalloc( (void**)&tx, rreq_max_inflight*sizeof(mp::mlx5::send_desc_t), hipHostMallocMapped ) );
CUDA_CHECK( hipHostGetDevicePointer ( &tx_d, tx, 0 ));
CUDA_CHECK( hipHostMalloc( (void**)&tx_wait, rreq_max_inflight*sizeof(mp::mlx5::wait_desc_t), hipHostMallocMapped ) );
CUDA_CHECK( hipHostGetDevicePointer ( &tx_wait_d, tx_wait, 0 ));
CUDA_CHECK( hipHostMalloc( (void**)&rx_wait, rreq_max_inflight*sizeof(mp::mlx5::wait_desc_t), hipHostMallocMapped ) );
CUDA_CHECK( hipHostGetDevicePointer ( &rx_wait_d, rx_wait, 0 ));
/*
size_t limitValue, freeValue, totalValue;
CUDA_CHECK(hipDeviceGetLimit ( &limitValue, hipLimitMallocHeapSize ));
CUDA_CHECK(hipMemGetInfo( &freeValue, &totalValue));
//if(my_rank) fprintf(stdout, "*** GPU, limit: %zd, total: %zd, free: %zd\n", limitValue, totalValue, freeValue);
*/
CUDA_CHECK(hipMalloc((void **)&sbuf_d, size*iter_count));
CUDA_CHECK(hipMemset(sbuf_d, 0, size*iter_count));
CUDA_CHECK(hipMalloc((void **)&rbuf_d, size*iter_count));
CUDA_CHECK(hipMemset(rbuf_d, 0, size*iter_count));
MP_CHECK(mp_register(sbuf_d, size*iter_count, &sreg));
MP_CHECK(mp_register(rbuf_d, size*iter_count, &rreg));
if (validate) {
mp_dbg_msg("initializing the buffer \n");
CUDA_CHECK(hipMemset(sbuf_d, (size + 1)%CHAR_MAX, size*iter_count));
CUDA_CHECK(hipMemset(rbuf_d, 0, size*iter_count));
CUDA_CHECK(hipDeviceSynchronize());
buf = (char *) calloc(size*iter_count, sizeof(char));
}
time_start = MPI_Wtime();
for (j=0; j<prepost_depth; j++) {
mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx);
MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + (size*j)), size, peer, &rreg, &rreq[rreq_idx]));
//Kernel
MP_CHECK(mp::mlx5::get_descriptors(&rx_wait[rreq_idx], &rreq[rreq_idx]));
rreq_idx = (rreq_idx + 1)%rreq_max_inflight;
rreq_inflight++;
}
prof_idx = 0;
assert(!(iter_count%steps_per_batch));
for (j = 0; j < iter_count; j += steps_per_batch) {
mp_dbg_msg("[%d] iteration :%d \n", my_rank, j);
for(cycle_index=0; cycle_index < steps_per_batch; cycle_index++)
{
sreq_idx = (j+cycle_index)%sreq_max_inflight;
MP_CHECK(mp_send_prepare((void *)((uintptr_t)sbuf_d + ((j+cycle_index)*size)), size, peer, &sreg, &sreq[sreq_idx]));
MP_CHECK(mp::mlx5::get_descriptors(&tx[sreq_idx], &sreq[sreq_idx]));
MP_CHECK(mp::mlx5::get_descriptors(&tx_wait[sreq_idx], &sreq[sreq_idx]));
sreq_inflight++;
}
//It's the same for both Rank0 and Rank1
hipLaunchKernelGGL(( exchange_kernel), dim3(1),dim3(16),0,stream, my_rank, tx_d+(j%sreq_max_inflight),
tx_wait_d+(j%sreq_max_inflight), rx_wait_d+(j%rreq_max_inflight),
steps_per_batch, kernel_time);
CUDA_CHECK(hipGetLastError());
//if (prof && !my_rank) PROF(prof, prof_idx++);
mp_dbg_msg("[%d] posted send request: %d \n", my_rank, sreq_idx);
mp_dbg_msg("[%d] requests inflight: %d \n", my_rank, sreq_inflight);
//Post others recv
if ((j + prepost_depth) < iter_count) {
for(cycle_index=0; cycle_index < steps_per_batch; cycle_index++)
{
if(rreq_inflight >= rreq_max_inflight)
break;
mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx);
MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + ((j + prepost_depth + cycle_index) * size)), size, peer, &rreg, &rreq[rreq_idx]));
MP_CHECK(mp::mlx5::get_descriptors(&rx_wait[rreq_idx], &rreq[rreq_idx]));
rreq_idx = (rreq_idx + 1)%rreq_max_inflight;
rreq_inflight++;
}
}
/*synchronize on oldest batch*/
if (sreq_inflight == sreq_max_inflight)
{
mp_dbg_msg("[%d] after waiting on recv, rreq_inflight: %d \n", my_rank, rreq_inflight);
for (cycle_index=0; cycle_index<steps_per_batch; cycle_index++) {
mp_dbg_msg("[%d] waiting on send request: %d \n", my_rank, complete_sreq_idx);
MP_CHECK(mp_wait(&sreq[complete_sreq_idx]));
mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_sreq_idx);
complete_sreq_idx = (complete_sreq_idx + 1)%sreq_max_inflight;
sreq_inflight--;
}
}
//The final number of wait will be always the same
if (rreq_inflight == rreq_max_inflight)
{
for (cycle_index=0; cycle_index<steps_per_batch; cycle_index++) {
mp_dbg_msg("[%d] waiting on recv request: %d \n", my_rank, complete_rreq_idx);
MP_CHECK(mp_wait(&rreq[complete_rreq_idx]));
mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx);
complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight;
rreq_inflight--;
}
}
if (j == (iter_count - steps_per_batch))
{
while (rreq_inflight > 0) {
MP_CHECK(mp_wait(&rreq[complete_rreq_idx]));
mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx);
complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight;
rreq_inflight--;
}
while (sreq_inflight > 0) {
MP_CHECK(mp_wait(&sreq[complete_sreq_idx]));
mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_sreq_idx);
complete_sreq_idx = (complete_sreq_idx + 1)%sreq_max_inflight;
sreq_inflight--;
}
}
/*
if (!my_rank && prof) {
PROF(prof, prof_idx++);
prof_update(prof);
prof_idx = 0;
}
*/
}
// TODO: move validate after timing
if (validate) {
CUDA_CHECK(hipMemcpy((void *)((uintptr_t)buf), (void *)((uintptr_t)rbuf_d), size*iter_count, hipMemcpyDefault));
//CUDA_CHECK(hipDeviceSynchronize());
char *value = (char *)((uintptr_t)buf);
for (i=0; i<size*iter_count; i++) {
if (value[i] != (size + 1)%CHAR_MAX) {
mp_dbg_msg("[%d] validation check failed index: %d expected: %d actual: %d \n",
my_rank, i, (size + 1)%CHAR_MAX, value[i]);
exit(-1);
}
}
free(buf);
}
MPI_Barrier(comm);
CUDA_CHECK(hipStreamSynchronize(stream));
time_stop = MPI_Wtime();
latency = (((time_stop - time_start)*1e6)/(iter_count*2));
CUDA_CHECK(hipDeviceSynchronize());
mp_deregister(&sreg);
mp_deregister(&rreg);
CUDA_CHECK(hipFree(sbuf_d));
CUDA_CHECK(hipFree(rbuf_d));
return latency;
}
int main (int argc, char *argv[])
{
int iter_count = 0, size = 0, dev_count = 0, local_rank = 0, dev_id = 0;
int kernel_time = 20;
int comm_comp_ratio = 0;
int validate = 0;
int max_size = MAX_SIZE;
char *value = getenv("ENABLE_VALIDATION");
if (value != NULL) {
validate = atoi(value);
}
value = getenv("ENABLE_DEBUG_MSG");
if (value != NULL) {
enable_debug_prints = atoi(value);
}
value = getenv("KERNEL_TIME");
if (value != NULL) {
kernel_time = atoi(value);
}
value = getenv("COMM_COMP_RATIO");
if (value != NULL) {
comm_comp_ratio = atoi(value);
}
size = 1;
value = getenv("SIZE");
if (value != NULL && atoi(value)) {
size = atoi(value);
}
value = getenv("MAX_SIZE");
if (value != NULL && atoi(value)) {
max_size = atoi(value);
}
int event_async = 0;
value = getenv("MP_EVENT_ASYNC");
if (value != NULL) {
event_async = atoi(value);
}
while(1) {
int c;
c = getopt(argc, argv, "d:W:");
if (c == -1)
break;
switch(c) {
case 'd':
gpu_id = strtol(optarg, NULL, 0);
break;
case 'W':
wait_key = strtol(optarg, NULL, 0);
break;
default:
printf("ERROR: invalid option\n");
exit(EXIT_FAILURE);
}
}
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (comm_size != 2) {
fprintf(stderr, "this test requires exactly two processes \n");
exit(-1);
}
CUDA_CHECK(hipGetDeviceCount(&dev_count));
if (dev_count <= 0) {
fprintf(stderr, "no CUDA devices found \n");
exit(-1);
}
if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) {
local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK"));
} else if (getenv("OMPI_COMM_WORLD_LOCAL_RANK") != NULL) {
local_rank = atoi(getenv("OMPI_COMM_WORLD_LOCAL_RANK"));
} else {
local_rank = 0;
}
if (gpu_id >= 0) {
dev_id = gpu_id;
} else if (getenv("USE_GPU")) {
dev_id = atoi(getenv("USE_GPU"));
} else {
dev_id = local_rank%dev_count;
}
if (dev_id >= dev_count) {
fprintf(stderr, "invalid dev_id\n");
exit(-1);
}
fprintf(stdout, "[%d] local_rank: %d dev_count: %d using GPU device: %d\n", my_rank, local_rank, dev_count, dev_id);
fprintf(stdout, "[%d] validate=%d event_async=%d\n", my_rank, validate, event_async);
CUDA_CHECK(hipSetDevice(dev_id));
CUDA_CHECK(hipFree(0));
hipDeviceProp_t prop;
CUDA_CHECK(hipGetDeviceProperties(&prop, dev_id));
CUDA_CHECK(hipMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, hipMemcpyHostToDevice));
fprintf(stdout, "[%d] GPU name=%s\n", my_rank, prop.name);
peer = !my_rank;
MP_CHECK(mp_init (MPI_COMM_WORLD, &peer, 1, MP_INIT_DEFAULT, dev_id));
iter_count = ITER_COUNT_SMALL;
if (!my_rank) {
fprintf(stdout, "steps_per_batch: %d batches_inflight: %d \n", steps_per_batch, batches_inflight);
fprintf(stdout, "WARNING: dumping half round-trip latency!!!\n");
}
prepost_depth = steps_per_batch*2;
sreq_max_inflight = steps_per_batch*batches_inflight;
rreq_max_inflight = (steps_per_batch*batches_inflight + prepost_depth);
rreq_max_inflight += steps_per_batch*2;
//sreq_max_inflight += 32;
/*allocating requests*/
sreq = (mp_request_t *) malloc(sreq_max_inflight*sizeof(mp_request_t));
rreq = (mp_request_t *) malloc(rreq_max_inflight*sizeof(mp_request_t));
CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
if (!my_rank) fprintf(stdout, "%10s\t %10s\n", "Size", "Async+Kernel");
for (; size<=max_size; size*=2)
{
double latency;
const char *tags = "kernel|send|recv|prepost|wait|";
if (size > 1024) {
iter_count = ITER_COUNT_LARGE;
}
if (!my_rank) {
if (prof_init(&prof_async, 1000, 1000, "1us", 100, 1, tags)) {
fprintf(stderr, "error in prof_init init.\n");
exit(-1);
}
prof_start = 1;
}
if (!my_rank) fprintf(stdout, "%10d", size);
/*warmup*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 1/*kernel_time*/, NULL/*prof*/);
/*Async + Kernel*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, &prof_async);
if (!my_rank) fprintf(stdout, "\t %8.2lf \n", latency);
if (!my_rank && validate) fprintf(stdout, "SendRecv test passed validation with message size: %d \n", size);
}
CUDA_CHECK(hipStreamDestroy(stream));
free(sreq);
free(rreq);
mp_finalize ();
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
| d72b8242bc31fa44e0ca4c81356eb30de80bf556.cu | /****
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****/
#include "mpi.h"
#include "mp.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include <string.h>
#include <stdio.h>
#include "assert.h"
#include <limits.h>
#include <sys/types.h>
#include <unistd.h>
#include "prof.h"
#include <cuda_profiler_api.h>
#include <mp/device.cuh>
#include "nvToolsExt.h"
#define MPI_CHECK(stmt) \
do { \
int result = (stmt); \
if (MPI_SUCCESS != result) { \
char string[MPI_MAX_ERROR_STRING]; \
int resultlen = 0; \
MPI_Error_string(result, string, &resultlen); \
fprintf(stderr, " (%s:%d) MPI check failed with %d (%*s)\n", \
__FILE__, __LINE__, result, resultlen, string); \
exit(-1); \
} \
} while(0)
#define CUDA_CHECK(stmt) \
do { \
cudaError_t result = (stmt); \
if (cudaSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n", \
__FILE__, __LINE__,cudaGetErrorString(result));\
exit(-1); \
} \
assert(cudaSuccess == result); \
} while (0)
#define MP_CHECK(stmt) \
do { \
int result = (stmt); \
if (0 != result) { \
fprintf(stderr, "[%s:%d] mp call failed \n", \
__FILE__, __LINE__); \
exit(-1); \
} \
assert(0 == result); \
} while (0)
int enable_debug_prints = 0;
#define mp_dbg_msg(FMT, ARGS...) do \
{ \
if (enable_debug_prints) { \
fprintf(stderr, "[%d] [%d] MP DBG %s() " FMT, getpid(), my_rank, __FUNCTION__ , ## ARGS); \
fflush(stderr); \
} \
} while(0)
#define MAX_SIZE 4096 //128*1024
#define ITER_COUNT_SMALL (2*1024)
#define ITER_COUNT_LARGE 256
//-------------------------------- NVTX -----------------------------------------
const uint32_t colors[] = { 0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
//-------------------------------------------------------------------------------
struct prof prof_async;
int prof_start = 0;
int prof_idx = 0;
int comm_size, my_rank, peer;
int steps_per_batch = 16, batches_inflight = 4;
mp::mlx5::send_desc_t *tx;
mp::mlx5::send_desc_t *tx_d;
mp::mlx5::wait_desc_t *tx_wait;
mp::mlx5::wait_desc_t *tx_wait_d;
mp::mlx5::wait_desc_t *rx_wait;
mp::mlx5::wait_desc_t *rx_wait_d;
__device__ int counter;
__device__ int clockrate;
__device__ void dummy_kernel(double time)
{
long long int start, stop;
double usec;
start = clock64();
do {
stop = clock64();
usec = ((double)(stop-start)*1000)/((double)clockrate);
counter = usec;
} while(usec < time);
}
__global__ void exchange_kernel(int my_rank,
mp::mlx5::send_desc_t *tx_d,
mp::mlx5::wait_desc_t *tx_wait_d,
mp::mlx5::wait_desc_t *rx_wait_d,
int iter_number, double kernel_time)
{
int i;
assert(gridDim.x == 1);
/*
* dummy_kernel + threadfence: simulate some work after wait
*/
for (i=0; i<iter_number; ++i) {
if (!my_rank) {
if (0 == threadIdx.x) {
//Kernel send
mp::device::mlx5::send(tx_d[i]);
mp::device::mlx5::wait(tx_wait_d[i]);
mp::device::mlx5::signal(tx_wait_d[i]);
//Kernel receive
mp::device::mlx5::wait(rx_wait_d[i]);
mp::device::mlx5::signal(rx_wait_d[i]);
}
//Must be sure that data have been correctly received
__syncthreads();
dummy_kernel(kernel_time);
__threadfence();
} else {
if (0 == threadIdx.x) {
//Kernel send
mp::device::mlx5::wait(rx_wait_d[i]);
mp::device::mlx5::signal(rx_wait_d[i]);
}
//Must be sure that data have been correctly received
dummy_kernel(kernel_time);
__threadfence();
// make sure NIC can fetch coherent data
if (0 == threadIdx.x) {
mp::device::mlx5::send(tx_d[i]);
mp::device::mlx5::wait(tx_wait_d[i]);
mp::device::mlx5::signal(tx_wait_d[i]);
}
__syncthreads();
}
}
}
/*application and pack buffers*/
int sreq_max_inflight = 0, rreq_max_inflight = 0, prepost_depth = 0;
size_t buf_size;
int gpu_id = -1;
int wait_key = 0;
double time_start, time_stop;
cudaStream_t stream;
/*mp specific objects*/
mp_request_t *sreq = NULL;
mp_request_t *rreq = NULL;
double sr_exchange (MPI_Comm comm, int size, int iter_count, int validate, double kernel_time, struct prof *prof)
{
int i, j, cycle_index, buf_index;
double latency;
double time_start, time_stop;
int sreq_idx = 0, rreq_idx = 0, complete_sreq_idx = 0, complete_rreq_idx = 0;
int sreq_inflight = 0, rreq_inflight = 0;
/*application and pack buffers*/
void *buf = NULL, *sbuf_d = NULL, *rbuf_d = NULL;
mp_reg_t sreg, rreg;
CUDA_CHECK( cudaHostAlloc( (void**)&tx, rreq_max_inflight*sizeof(mp::mlx5::send_desc_t), cudaHostAllocMapped ) );
CUDA_CHECK( cudaHostGetDevicePointer ( &tx_d, tx, 0 ));
CUDA_CHECK( cudaHostAlloc( (void**)&tx_wait, rreq_max_inflight*sizeof(mp::mlx5::wait_desc_t), cudaHostAllocMapped ) );
CUDA_CHECK( cudaHostGetDevicePointer ( &tx_wait_d, tx_wait, 0 ));
CUDA_CHECK( cudaHostAlloc( (void**)&rx_wait, rreq_max_inflight*sizeof(mp::mlx5::wait_desc_t), cudaHostAllocMapped ) );
CUDA_CHECK( cudaHostGetDevicePointer ( &rx_wait_d, rx_wait, 0 ));
/*
size_t limitValue, freeValue, totalValue;
CUDA_CHECK(cudaDeviceGetLimit ( &limitValue, cudaLimitMallocHeapSize ));
CUDA_CHECK(cudaMemGetInfo( &freeValue, &totalValue));
//if(my_rank) fprintf(stdout, "*** GPU, limit: %zd, total: %zd, free: %zd\n", limitValue, totalValue, freeValue);
*/
CUDA_CHECK(cudaMalloc((void **)&sbuf_d, size*iter_count));
CUDA_CHECK(cudaMemset(sbuf_d, 0, size*iter_count));
CUDA_CHECK(cudaMalloc((void **)&rbuf_d, size*iter_count));
CUDA_CHECK(cudaMemset(rbuf_d, 0, size*iter_count));
MP_CHECK(mp_register(sbuf_d, size*iter_count, &sreg));
MP_CHECK(mp_register(rbuf_d, size*iter_count, &rreg));
if (validate) {
mp_dbg_msg("initializing the buffer \n");
CUDA_CHECK(cudaMemset(sbuf_d, (size + 1)%CHAR_MAX, size*iter_count));
CUDA_CHECK(cudaMemset(rbuf_d, 0, size*iter_count));
CUDA_CHECK(cudaDeviceSynchronize());
buf = (char *) calloc(size*iter_count, sizeof(char));
}
time_start = MPI_Wtime();
for (j=0; j<prepost_depth; j++) {
mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx);
MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + (size*j)), size, peer, &rreg, &rreq[rreq_idx]));
//Kernel
MP_CHECK(mp::mlx5::get_descriptors(&rx_wait[rreq_idx], &rreq[rreq_idx]));
rreq_idx = (rreq_idx + 1)%rreq_max_inflight;
rreq_inflight++;
}
prof_idx = 0;
assert(!(iter_count%steps_per_batch));
for (j = 0; j < iter_count; j += steps_per_batch) {
mp_dbg_msg("[%d] iteration :%d \n", my_rank, j);
for(cycle_index=0; cycle_index < steps_per_batch; cycle_index++)
{
sreq_idx = (j+cycle_index)%sreq_max_inflight;
MP_CHECK(mp_send_prepare((void *)((uintptr_t)sbuf_d + ((j+cycle_index)*size)), size, peer, &sreg, &sreq[sreq_idx]));
MP_CHECK(mp::mlx5::get_descriptors(&tx[sreq_idx], &sreq[sreq_idx]));
MP_CHECK(mp::mlx5::get_descriptors(&tx_wait[sreq_idx], &sreq[sreq_idx]));
sreq_inflight++;
}
//It's the same for both Rank0 and Rank1
exchange_kernel<<<1,16,0,stream>>>(my_rank, tx_d+(j%sreq_max_inflight),
tx_wait_d+(j%sreq_max_inflight), rx_wait_d+(j%rreq_max_inflight),
steps_per_batch, kernel_time);
CUDA_CHECK(cudaGetLastError());
//if (prof && !my_rank) PROF(prof, prof_idx++);
mp_dbg_msg("[%d] posted send request: %d \n", my_rank, sreq_idx);
mp_dbg_msg("[%d] requests inflight: %d \n", my_rank, sreq_inflight);
//Post others recv
if ((j + prepost_depth) < iter_count) {
for(cycle_index=0; cycle_index < steps_per_batch; cycle_index++)
{
if(rreq_inflight >= rreq_max_inflight)
break;
mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx);
MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + ((j + prepost_depth + cycle_index) * size)), size, peer, &rreg, &rreq[rreq_idx]));
MP_CHECK(mp::mlx5::get_descriptors(&rx_wait[rreq_idx], &rreq[rreq_idx]));
rreq_idx = (rreq_idx + 1)%rreq_max_inflight;
rreq_inflight++;
}
}
/*synchronize on oldest batch*/
if (sreq_inflight == sreq_max_inflight)
{
mp_dbg_msg("[%d] after waiting on recv, rreq_inflight: %d \n", my_rank, rreq_inflight);
for (cycle_index=0; cycle_index<steps_per_batch; cycle_index++) {
mp_dbg_msg("[%d] waiting on send request: %d \n", my_rank, complete_sreq_idx);
MP_CHECK(mp_wait(&sreq[complete_sreq_idx]));
mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_sreq_idx);
complete_sreq_idx = (complete_sreq_idx + 1)%sreq_max_inflight;
sreq_inflight--;
}
}
//The final number of wait will be always the same
if (rreq_inflight == rreq_max_inflight)
{
for (cycle_index=0; cycle_index<steps_per_batch; cycle_index++) {
mp_dbg_msg("[%d] waiting on recv request: %d \n", my_rank, complete_rreq_idx);
MP_CHECK(mp_wait(&rreq[complete_rreq_idx]));
mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx);
complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight;
rreq_inflight--;
}
}
if (j == (iter_count - steps_per_batch))
{
while (rreq_inflight > 0) {
MP_CHECK(mp_wait(&rreq[complete_rreq_idx]));
mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx);
complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight;
rreq_inflight--;
}
while (sreq_inflight > 0) {
MP_CHECK(mp_wait(&sreq[complete_sreq_idx]));
mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_sreq_idx);
complete_sreq_idx = (complete_sreq_idx + 1)%sreq_max_inflight;
sreq_inflight--;
}
}
/*
if (!my_rank && prof) {
PROF(prof, prof_idx++);
prof_update(prof);
prof_idx = 0;
}
*/
}
// TODO: move validate after timing
if (validate) {
CUDA_CHECK(cudaMemcpy((void *)((uintptr_t)buf), (void *)((uintptr_t)rbuf_d), size*iter_count, cudaMemcpyDefault));
//CUDA_CHECK(cudaDeviceSynchronize());
char *value = (char *)((uintptr_t)buf);
for (i=0; i<size*iter_count; i++) {
if (value[i] != (size + 1)%CHAR_MAX) {
mp_dbg_msg("[%d] validation check failed index: %d expected: %d actual: %d \n",
my_rank, i, (size + 1)%CHAR_MAX, value[i]);
exit(-1);
}
}
free(buf);
}
MPI_Barrier(comm);
CUDA_CHECK(cudaStreamSynchronize(stream));
time_stop = MPI_Wtime();
latency = (((time_stop - time_start)*1e6)/(iter_count*2));
CUDA_CHECK(cudaDeviceSynchronize());
mp_deregister(&sreg);
mp_deregister(&rreg);
CUDA_CHECK(cudaFree(sbuf_d));
CUDA_CHECK(cudaFree(rbuf_d));
return latency;
}
int main (int argc, char *argv[])
{
int iter_count = 0, size = 0, dev_count = 0, local_rank = 0, dev_id = 0;
int kernel_time = 20;
int comm_comp_ratio = 0;
int validate = 0;
int max_size = MAX_SIZE;
char *value = getenv("ENABLE_VALIDATION");
if (value != NULL) {
validate = atoi(value);
}
value = getenv("ENABLE_DEBUG_MSG");
if (value != NULL) {
enable_debug_prints = atoi(value);
}
value = getenv("KERNEL_TIME");
if (value != NULL) {
kernel_time = atoi(value);
}
value = getenv("COMM_COMP_RATIO");
if (value != NULL) {
comm_comp_ratio = atoi(value);
}
size = 1;
value = getenv("SIZE");
if (value != NULL && atoi(value)) {
size = atoi(value);
}
value = getenv("MAX_SIZE");
if (value != NULL && atoi(value)) {
max_size = atoi(value);
}
int event_async = 0;
value = getenv("MP_EVENT_ASYNC");
if (value != NULL) {
event_async = atoi(value);
}
while(1) {
int c;
c = getopt(argc, argv, "d:W:");
if (c == -1)
break;
switch(c) {
case 'd':
gpu_id = strtol(optarg, NULL, 0);
break;
case 'W':
wait_key = strtol(optarg, NULL, 0);
break;
default:
printf("ERROR: invalid option\n");
exit(EXIT_FAILURE);
}
}
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (comm_size != 2) {
fprintf(stderr, "this test requires exactly two processes \n");
exit(-1);
}
CUDA_CHECK(cudaGetDeviceCount(&dev_count));
if (dev_count <= 0) {
fprintf(stderr, "no CUDA devices found \n");
exit(-1);
}
if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) {
local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK"));
} else if (getenv("OMPI_COMM_WORLD_LOCAL_RANK") != NULL) {
local_rank = atoi(getenv("OMPI_COMM_WORLD_LOCAL_RANK"));
} else {
local_rank = 0;
}
if (gpu_id >= 0) {
dev_id = gpu_id;
} else if (getenv("USE_GPU")) {
dev_id = atoi(getenv("USE_GPU"));
} else {
dev_id = local_rank%dev_count;
}
if (dev_id >= dev_count) {
fprintf(stderr, "invalid dev_id\n");
exit(-1);
}
fprintf(stdout, "[%d] local_rank: %d dev_count: %d using GPU device: %d\n", my_rank, local_rank, dev_count, dev_id);
fprintf(stdout, "[%d] validate=%d event_async=%d\n", my_rank, validate, event_async);
CUDA_CHECK(cudaSetDevice(dev_id));
CUDA_CHECK(cudaFree(0));
cudaDeviceProp prop;
CUDA_CHECK(cudaGetDeviceProperties(&prop, dev_id));
CUDA_CHECK(cudaMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, cudaMemcpyHostToDevice));
fprintf(stdout, "[%d] GPU name=%s\n", my_rank, prop.name);
peer = !my_rank;
MP_CHECK(mp_init (MPI_COMM_WORLD, &peer, 1, MP_INIT_DEFAULT, dev_id));
iter_count = ITER_COUNT_SMALL;
if (!my_rank) {
fprintf(stdout, "steps_per_batch: %d batches_inflight: %d \n", steps_per_batch, batches_inflight);
fprintf(stdout, "WARNING: dumping half round-trip latency!!!\n");
}
prepost_depth = steps_per_batch*2;
sreq_max_inflight = steps_per_batch*batches_inflight;
rreq_max_inflight = (steps_per_batch*batches_inflight + prepost_depth);
rreq_max_inflight += steps_per_batch*2;
//sreq_max_inflight += 32;
/*allocating requests*/
sreq = (mp_request_t *) malloc(sreq_max_inflight*sizeof(mp_request_t));
rreq = (mp_request_t *) malloc(rreq_max_inflight*sizeof(mp_request_t));
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
if (!my_rank) fprintf(stdout, "%10s\t %10s\n", "Size", "Async+Kernel");
for (; size<=max_size; size*=2)
{
double latency;
const char *tags = "kernel|send|recv|prepost|wait|";
if (size > 1024) {
iter_count = ITER_COUNT_LARGE;
}
if (!my_rank) {
if (prof_init(&prof_async, 1000, 1000, "1us", 100, 1, tags)) {
fprintf(stderr, "error in prof_init init.\n");
exit(-1);
}
prof_start = 1;
}
if (!my_rank) fprintf(stdout, "%10d", size);
/*warmup*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 1/*kernel_time*/, NULL/*prof*/);
/*Async + Kernel*/
latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, &prof_async);
if (!my_rank) fprintf(stdout, "\t %8.2lf \n", latency);
if (!my_rank && validate) fprintf(stdout, "SendRecv test passed validation with message size: %d \n", size);
}
CUDA_CHECK(cudaStreamDestroy(stream));
free(sreq);
free(rreq);
mp_finalize ();
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
|
b6436428d35e0dcbb12912af6b09654f1dfd98a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
template <typename T>
__global__ void
awkward_NumpyArray_contiguous_init(T* toptr,
int64_t skip,
int64_t stride,
uint64_t invocation_index,
uint64_t* err_code) {
if (err_code[0] == NO_ERROR) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id < skip) {
toptr[thread_id] = (thread_id * stride);
}
}
}
| b6436428d35e0dcbb12912af6b09654f1dfd98a0.cu | // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
template <typename T>
__global__ void
awkward_NumpyArray_contiguous_init(T* toptr,
int64_t skip,
int64_t stride,
uint64_t invocation_index,
uint64_t* err_code) {
if (err_code[0] == NO_ERROR) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id < skip) {
toptr[thread_id] = (thread_id * stride);
}
}
}
|
597a30fedaeceebaf25a86915bf74af93797186b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
#define CASENAME "Test31"
#define BLOCKSIZEX 64
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define XDIM 64
#define YDIM 64
#define ZDIM 10
#define TMAX 5000
#define STARTF 0
#define OBSTR1 4.f
#define OBSTX1 31.5f
#define OBSTY1 31.5f
#define OBSTZ1 15.5f
#define OBSTR2 4.f
#define OBSTX2 63.5f
#define OBSTY2 31.5f
#define OBSTZ2 31.5f
#define RE 100.f//2000.f//100.f;
#define UMAX 0.08f
#define METHOD "SINGLE" //SINGLE,HYB,TEXT,SHARED,CACHE
#define SmagLES "NO" //YES,NO
#define MODEL "MRT" //BGK,MRT,STREAM
#define ZPERIODIC "NO"
#define CS 0.04f
//#define CHARLENGTH = XDIM-2.f;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
/*
Image List:
0 fluid
1 BB
2
3 DirichletWest(simple)
10 BB(force)
13 DirichletWest_Reg
14 NeumannEast_Reg
15 DirichletNorth_Reg
16 DirichletSouth_Reg
21 ysymmetry_top
22 ysymmetry_bot
23 zsymmetry_top
24 zsymmetry_bot
25 xsymmetry_top
26 xsymmetry_bot
*/
inline __device__ int ImageFcn(float x, float y, float z){
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// return 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// return 10;
//if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
// return 10;
// }
// else
// //if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f)
// if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f)
// return 1;
// else if(x < 17.5f)
// return 13;
// else if(x > 78.5f)
// return 14;
// else
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
return 10;
else
return 0;
}
inline __device__ int ImageFcn(int x, int y, int z){
int value = 0;
//Cylinder
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
//Sphere
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
//// if(z == 0 || z == ZDIM-1)
//// return 1;
//// else
// return 10;
// }
// if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
// else if(y == 0)
// value = 200;//22;
// else if(y == YDIM-1)
// value = 100;
// else if(x == 0)
// value = 26;
// else if(x == XDIM-1)
// value = 25;
// else if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
//return value;
//Lid Driven Cavity
// if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// value = 1;
// else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2)
// return 1;
// else if(x == 0)
// return 1;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
if(y == 0)
value = 200;//22;
else if(y == YDIM-1)
value = 100;
else if(x == 0)
value = 26;
else if(x == XDIM-1)
value = 25;
// else if(x == 0)
// return 53;
// else if(x == XDIM-1)
// return 54;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// return 1.f;
}
__device__ void DirichletWest(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(y == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(y == YDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(zcoord)*1.5;
v = 0.0f;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float usqr = u*u+v*v+w*w;
f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f0 = 1.0f/3.0f*(rho-1.5f*usqr);
// f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
//// f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
//// f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
//// f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
//// f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
//// f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
//// f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
//// f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
//// f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
//// f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
//// f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
//// f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
//// f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void DirichletWest_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(y)*1.5;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f1 = f3+0.33333333f*u;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f5 = f7+0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f10= f17+0.166666667f*(u+w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f15= f12+0.166666667f*(u-w);
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
void __device__ DirichletWest_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
u = UMAX;//*PoisProf(z)*1.5;
//v = 0.0f;
//w = 0.0f;
float usqr = u*u;//+v*v+w*w;
float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f1 = feq1 +f3 -feq3 ;
f5 = feq5 +f7 -feq7 ;
f8 = feq8 +f6 -feq6 ;
f10= feq10+f17-feq17;
f15= feq15+f12-feq12;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
void __device__ NeumannEast_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
else if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
float rho = 1.0f;
//v = 0.0f;
//w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float usqr = u*u;//+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*( u+v)+4.5f*( u+v)*( u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*( u-v)+4.5f*( u-v)*( u-v)-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*( u+w)+4.5f*( u+w)*( u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*( v+w)+4.5f*( v+w)*( v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*( u-w)+4.5f*( u-w)*( u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*( v-w)+4.5f*( v-w)*( v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f3 = feq3 +f1 -feq1 ;
f7 = feq7 +f5 -feq5 ;
f6 = feq6 +f8 -feq8 ;
f17= feq17+f10-feq10;
f12= feq12+f15-feq15;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
__device__ void NeumannEast(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float u2 = u*u;
float v2 = v*v;
float w2 = w*w;
float usqr = u2+v2+w2;
// f3 = f1 -0.333333333f*u;
// f7 = f5 -0.166666667f*(u+v);
// f6 = f8 -0.166666667f*(u-v);
// f17= f10-0.166666667f*(u+w);
// f12= f15-0.166666667f*(u-w);
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void NeumannEast_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f3 = f1 -0.333333333f*u;
f7 = f5 -0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f17= f10-0.166666667f*(u+w);
f12= f15-0.166666667f*(u-w);
// f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+
// (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2));
// f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+
// (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v));
// f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+
// (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v));
// f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+
// (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w));
// f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+
// (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w));
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f4 = f2-0.33333333f*v;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f7 = f5-0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f13= f16-0.166666667f*(v-w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f18= f11-0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f2 = f4 +0.33333333f*v;
f5 = f7 +0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f16= f13+0.166666667f*(v-w);
f11= f18+0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void xsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f1 = f3 ;
f5 = f6 ;
f8 = f7 ;
f10= f12;
f15= f17;
}
__device__ void xsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
// else if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f3 = f1 ;
f6 = f5 ;
f7 = f8 ;
f12= f10;
f17= f15;
}
__device__ void ysymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
__device__ void ysymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
__device__ void zsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
__device__ void zsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
inline __device__ void boundaries(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
//DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
//NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// if(im == 4)//DirichletWest
// {
// NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
}
inline __device__ void boundaries_force(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
else if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//zsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//zsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void North_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float rho)
{
rho = 1.0f;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
inline __device__ void South_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float v)
{
float rho,u,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
// f0 =(1.f-omega)*f0 +omega*(0.3333333333f*(rho-1.5f*usqr));
// f1 =(1.f-omega)*f1 +omega*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =(1.f-omega)*f2 +omega*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =(1.f-omega)*f3 +omega*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =(1.f-omega)*f4 +omega*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =(1.f-omega)*f5 +omega*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =(1.f-omega)*f6 +omega*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =(1.f-omega)*f7 +omega*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =(1.f-omega)*f8 +omega*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =(1.f-omega)*f9 +omega*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=(1.f-omega)*f10+omega*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=(1.f-omega)*f11+omega*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=(1.f-omega)*f12+omega*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=(1.f-omega)*f13+omega*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=(1.f-omega)*f14+omega*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=(1.f-omega)*f15+omega*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=(1.f-omega)*f16+omega*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=(1.f-omega)*f17+omega*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=(1.f-omega)*f18+omega*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
f0 -=omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
f1 -=omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 -=omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
f3 -=omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
f4 -=omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
f5 -=omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
f6 -=omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
f7 -=omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
f8 -=omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
f9 -=omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
f10-=omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
f11-=omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr));
f12-=omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
f13-=omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr));
f14-=omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
f15-=omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
f16-=omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
f17-=omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
f18-=omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
inline __device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
//// float PI11 = -1.0f/38.0f*( (m1)+19.0f*omega* (m9));
//// float PI22 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
//// float PI33 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9;
// float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// //float Cs = 0.01f;
// omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
// //omega = 1.0f/(1.0f/omega+3.f*CS*Smag*LRFACTOR*LRFACTOR);
// //omega = 1.0f/(1.0f*LRLEVEL/1.99983f-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = LRFACTOR*(sqrt(4.f/9.f*tau0*tau0+8.f*CS*LRFACTOR*Q)-2.f/3.f*tau0)/(4.f*CS*LRFACTOR*LRFACTOR);
//omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
//float tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q));
omega = 1.f/tau;
//float tau = 3.f*nu0*LRFACTOR+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*LRFACTOR*LRFACTOR*Q)-tau0)*0.5f;
//omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void mrt_collide_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float Cs)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
// float PI11 = -0.026315789f*m1-0.5f *omega*m9;
// float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//
// float PI12 = -1.5f*omega*m13;
// float PI23 = -1.5f*omega*m14;
// float PI13 = -1.5f*omega*m15;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
// float PI11 = LRLEVEL*-1.0f/38.0f*( (m1)+19.0f*omega* (m9));
// float PI22 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
// float PI33 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)/3.0f;
// float Smag = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+PI12*PI12+PI23*PI23+PI13*PI13);
// omega = 1.0f/(3.0f*(nu0+Cs*Smag*LRLEVEL*LRLEVEL)+0.5f);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//
//float Smag = (sqrt(nu0*nu0+18.f*CS*Q)-nu0)/(6.f*CS);
//
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//
//float tau0 = 1.f/omega;
//float tau = 3.f*nu0+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*Q)-tau0)*0.5f;
//omega = 1.f/tau;
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = (sqrt(4.f/9.f*tau0*tau0+8.f*CS*Q)-2.f/3.f*tau0)/(4.f*CS);
//omega = 1.0f/(3.0f*(nu0+CS*Smag)+0.5f);
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*sqrt(2.f)*CS*Q));
omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void bgk_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18) -19.f*(u*u+v*v+w*w);
//float m2 = 12.f*f0+-4.f*f1+-4.f*f2+-4.f*f3+-4.f*f4+f5+f6+f7+f8+-4.f*f9+f10+f11+f12+f13+-4.f*f14+f15+f16+f17+f18 +7.53968254f*(u*u+v*v+w*w);
//float m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
//float m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
//float m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
//float m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
//float m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
//float m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
//float m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
//float m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//omega = 1.0f/(3.0f*(nu0+Cs*Smag*sqrt(2.f))+0.5f);
//omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*sqrt(2.f)*LRFACTOR);
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0c = 1.f/omega;
//float tau = tau0c+0.5*(-tau0c+sqrt(tau0c*tau0c+18.f*CS*Q));//tau_total of coarse mesh
//omega = 1.f/tau;//total omega on coarse mesh
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
//omega2= 1.f/tau;
SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);//for post-collision
//SF = omega*0.5f/omega2;//for post-streaming, pre-collision?
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_fc_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float omega2)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float PI11 = -0.026315789f*m1-0.5f *omega*m9;
//float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
//float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//float PI12 = -1.5f*omega*m13;
//float PI23 = -1.5f*omega*m14;
//float PI13 = -1.5f*omega*m15;
////we know Smag on fine mesh. Smag_c=Smag_f*sqrt(2)
//float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
////omega = 1.0f/(3.0f*(nu0+CS*Smag*sqrt(2.f))+0.5f);
////omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
////omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0f = 1.f/omega2;
//float tau0c = 1.f/omega;
//float tau = tau0f+0.5*(-tau0f+sqrt(tau0f*tau0f+18.f*CS*sqrt(2.f)*Q));//tau_total of fine
//omega2 = 1.f/tau;//total omega on fine mesh
//tau = LRLEVEL*(tau-tau0f)+tau0c;
//omega= 1.f/tau;
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*Q));
float SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);
//float SF = omega2*2.f/omega;
//float SF = ((1.0f-omega)*omega2/LRFACTOR)/(omega*(1.0f-omega2));
//SF = omega*2.f/omega2;
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch)
{
return (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(ZDIM/2-2);
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
return (x+y*pitch)+f_num*pitch*YDIM;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
//__global__ void mrt_d_single_force(float* fin, float* fout,
// float omega, size_t pitch, float *FX, float *FY, float *FZ,
// int t,float *uAv, float *vAv, float *ufluc, float *vfluc, int GPU)
//{
// int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
// int y = threadIdx.y+blockIdx.y*blockDim.y;//;
// int z = threadIdx.z+blockIdx.z*blockDim.z+ZDIM/2*GPU;
// int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
// float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
// __shared__ int check[1];
// check[0] = 0;
// syncthreads();
// float u_Av, v_Av, u_fluc, v_fluc;
//
//
//// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
//// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
//// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
//// (x>XDIM-1)))
//// {
//// }
//// else{
// int im = ImageFcn(x,y,z);
//
// f0 = fin[j];
// f1 = fin[f_mem(1 ,x-1,y ,z ,pitch)];
// f3 = fin[f_mem(3 ,x+1,y ,z ,pitch)];
// f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
// f5 = fin[f_mem(5 ,x-1,y-1,z ,pitch)];
// f6 = fin[f_mem(6 ,x+1,y-1,z ,pitch)];
// f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
// f7 = fin[f_mem(7 ,x+1,y+1,z ,pitch)];
// f8 = fin[f_mem(8 ,x-1,y+1,z ,pitch)];
// if(z != 0){
// f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
// f10= fin[f_mem(10,x-1,y ,z-1,pitch)];
// f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
// f12= fin[f_mem(12,x+1,y ,z-1,pitch)];
// f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
// }
// else{
// f9 = fin[f_mem(9 ,x ,y ,ZDIM-1,pitch)];
// f10= fin[f_mem(10,dmax_p(x-1,XDIM),y ,ZDIM-1,pitch)];
// f11= fin[f_mem(11,x ,dmax_p(y-1,YDIM),ZDIM-1,pitch)];
// f12= fin[f_mem(12,dmin_p(x+1,XDIM),y ,ZDIM-1,pitch)];
// f13= fin[f_mem(13,x ,dmin_p(y+1,YDIM),ZDIM-1,pitch)];
// }
// if(z != ZDIM-1){
// f14= fin[f_mem(14,x ,y ,z+1,pitch)];
// f15= fin[f_mem(15,x-1,y ,z+1,pitch)];
// f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
// f17= fin[f_mem(17,x+1,y ,z+1,pitch)];
// f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
// }
// else{
// f14= fin[f_mem(14,x ,y ,0 ,pitch)];
// f15= fin[f_mem(15,dmax_p(x-1,XDIM),y ,0 ,pitch)];
// f16= fin[f_mem(16,x ,dmax_p(y-1,YDIM),0 ,pitch)];
// f17= fin[f_mem(17,dmin_p(x+1,XDIM),y ,0 ,pitch)];
// f18= fin[f_mem(18,x ,dmin_p(y+1,YDIM),0 ,pitch)];
// }
//
// if(im == 1 || im == 10){//BB
// if(im == 10){
// check[0] = 1;
// sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
// sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
// sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
// }
// else{
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// }
// fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
// fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
// fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
// fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
// fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
// fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
// fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
// fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
// fout[j+pitch*YDIM*ZDIM*9 ] = f14;
// fout[j+pitch*YDIM*ZDIM*10] = f17;
// fout[j+pitch*YDIM*ZDIM*11] = f18;
// fout[j+pitch*YDIM*ZDIM*12] = f15;
// fout[j+pitch*YDIM*ZDIM*13] = f16;
// fout[j+pitch*YDIM*ZDIM*14] = f9 ;
// fout[j+pitch*YDIM*ZDIM*15] = f12;
// fout[j+pitch*YDIM*ZDIM*16] = f13;
// fout[j+pitch*YDIM*ZDIM*17] = f10;
// fout[j+pitch*YDIM*ZDIM*18] = f11;
// }
// else{
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// boundaries_force(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
//
// if(im == 100)//north outlet
// {
// f0 = fin[f_mem(0 ,x,y-1,z,pitch)];
// f1 = fin[f_mem(1 ,x,y-1,z,pitch)];
// f3 = fin[f_mem(3 ,x,y-1,z,pitch)];
// f2 = fin[f_mem(2 ,x,y-1,z,pitch)];
// f5 = fin[f_mem(5 ,x,y-1,z,pitch)];
// f6 = fin[f_mem(6 ,x,y-1,z,pitch)];
// f4 = fin[f_mem(4 ,x,y-1,z,pitch)];
// f7 = fin[f_mem(7 ,x,y-1,z,pitch)];
// f8 = fin[f_mem(8 ,x,y-1,z,pitch)];
// f9 = fin[f_mem(9 ,x,y-1,z,pitch)];
// f10= fin[f_mem(10,x,y-1,z,pitch)];
// f11= fin[f_mem(11,x,y-1,z,pitch)];
// f12= fin[f_mem(12,x,y-1,z,pitch)];
// f13= fin[f_mem(13,x,y-1,z,pitch)];
// f14= fin[f_mem(14,x,y-1,z,pitch)];
// f15= fin[f_mem(15,x,y-1,z,pitch)];
// f16= fin[f_mem(16,x,y-1,z,pitch)];
// f17= fin[f_mem(17,x,y-1,z,pitch)];
// f18= fin[f_mem(18,x,y-1,z,pitch)];
//
// float rho,u,v,w;
// rho = 1.0f;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
//
// float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//
// m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
// m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
// m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
// m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
// m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
// m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
// m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
// m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
// m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
// m13 = f5+ - f6+ f7+ - f8 ;
// m14 = f11 + - f13 + - f16 + f18;
// m15 = f10 + - f12 + - f15 + f17 ;
// m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
// m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
// m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
//
//f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
//f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
//f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
//f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
//f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
//f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
//f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
//f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
//f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
//f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
//f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
//f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
//f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
//f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
//f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
//f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
//f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
//f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
//f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
//
// }
// if(im == 200)//south inlet
// {
// f0 = fin[f_mem(0 ,x,y+1,z,pitch)];
// f1 = fin[f_mem(1 ,x,y+1,z,pitch)];
// f3 = fin[f_mem(3 ,x,y+1,z,pitch)];
// f2 = fin[f_mem(2 ,x,y+1,z,pitch)];
// f5 = fin[f_mem(5 ,x,y+1,z,pitch)];
// f6 = fin[f_mem(6 ,x,y+1,z,pitch)];
// f4 = fin[f_mem(4 ,x,y+1,z,pitch)];
// f7 = fin[f_mem(7 ,x,y+1,z,pitch)];
// f8 = fin[f_mem(8 ,x,y+1,z,pitch)];
// f9 = fin[f_mem(9 ,x,y+1,z,pitch)];
// f10= fin[f_mem(10,x,y+1,z,pitch)];
// f11= fin[f_mem(11,x,y+1,z,pitch)];
// f12= fin[f_mem(12,x,y+1,z,pitch)];
// f13= fin[f_mem(13,x,y+1,z,pitch)];
// f14= fin[f_mem(14,x,y+1,z,pitch)];
// f15= fin[f_mem(15,x,y+1,z,pitch)];
// f16= fin[f_mem(16,x,y+1,z,pitch)];
// f17= fin[f_mem(17,x,y+1,z,pitch)];
// f18= fin[f_mem(18,x,y+1,z,pitch)];
//
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = UMAX;//f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
//
// float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//
// m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
// m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
// m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
// m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
// m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
// m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
// m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
// m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
// m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
// m13 = f5+ - f6+ f7+ - f8 ;
// m14 = f11 + - f13 + - f16 + f18;
// m15 = f10 + - f12 + - f15 + f17 ;
// m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
// m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
// m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
//
//f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
//f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
//f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
//f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
//f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
//f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
//f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
//f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
//f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
//f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
//f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
//f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
//f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
//f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
//f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
//f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
//f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
//f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
//f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
//
// }
//
//
// //float Cs = 0.01f;
// //if(XDIM-x < 64.f)
// ////Cs = 0.01f+(x-64.f)/64.f*(x-64.f)/64.f*0.1f;
// //Cs = 0.01f*pow(2.f,((x-448.f)/16.f));
//
// if(MODEL == "MRT")
// mrt_collide_LES(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega,CS);
// else if(MODEL == "BGK")
// bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
//
// if(VELAV == "YES"){
// if(t>=START_VELAV && t<START_VELFLUC){
// u_Av = uAv[j];
// v_Av = vAv[j];
// vel_av(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
// uAv[j] = u_Av;
// vAv[j] = v_Av;
// }
// else if(t>=START_VELFLUC){
// u_Av = uAv[j];
// v_Av = vAv[j];
// u_fluc = ufluc[j];
// v_fluc = vfluc[j];
// vel_fluc(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
// ufluc[j] = u_fluc;
// vfluc[j] = v_fluc;
//
// }
// }
//
//
//
// fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
// fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
// fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
// fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
// fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
// fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
// fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
// fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
// fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
// fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
// fout[f_mem(10,x,y,z,pitch)] = f10;
// fout[f_mem(11,x,y,z,pitch)] = f11;
// fout[f_mem(12,x,y,z,pitch)] = f12;
// fout[f_mem(13,x,y,z,pitch)] = f13;
// fout[f_mem(14,x,y,z,pitch)] = f14;
// fout[f_mem(15,x,y,z,pitch)] = f15;
// fout[f_mem(16,x,y,z,pitch)] = f16;
// fout[f_mem(17,x,y,z,pitch)] = f17;
// fout[f_mem(18,x,y,z,pitch)] = f18;
// }
//
// syncthreads();
// if(check[0] == 1 && t>=STARTF && REFINEMENT == "NO"){
// //reduction for force
// int nTotalThreads = blockDim.x;
// while(nTotalThreads > 1){
// int halfPoint = (nTotalThreads >> 1);
// if(threadIdx.x < halfPoint){
// sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
// sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
// sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
// }
// syncthreads();
// nTotalThreads = halfPoint;
// }
// if(threadIdx.x == 0){
// atomicAdd(&FX[t],sumX[0]);
// atomicAdd(&FY[t],sumY[0]);
// atomicAdd(&FZ[t],sumZ[0]);
// }
// }
//
//
//// }
//}
__global__ void mrt_d_single(float* fA, float* fB,
float omega, size_t pitch, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z+ZDIM/2*GPU;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
// && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)){
// }
// else{
f0 = fA[j];
f1 = fA[f_mem(1 ,x-1,y ,z ,pitch)];
f3 = fA[f_mem(3 ,x+1,y ,z ,pitch)];
f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)];
f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch)];
f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch)];
f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)];
f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch)];
f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch)];
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch)];
f14= fA[f_mem(14,x ,y ,z+1,pitch)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch)];
//f18= fA[f_mem(18,x ,y+1,dmin(z+1,ZDIM),pitch)];
if(z != ZDIM-1)
f18= fA[f_mem(18,x ,y+1,z+1,pitch)];
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch)] = f14;
fB[f_mem(10,x,y,z,pitch)] = f17;
fB[f_mem(11,x,y,z,pitch)] = f18;
fB[f_mem(12,x,y,z,pitch)] = f15;
fB[f_mem(13,x,y,z,pitch)] = f16;
fB[f_mem(14,x,y,z,pitch)] = f9 ;
fB[f_mem(15,x,y,z,pitch)] = f12;
fB[f_mem(16,x,y,z,pitch)] = f13;
fB[f_mem(17,x,y,z,pitch)] = f10;
fB[f_mem(18,x,y,z,pitch)] = f11;
}
else{
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch)] = f9 ;
fB[f_mem(10,x,y,z,pitch)] = f10;
fB[f_mem(11,x,y,z,pitch)] = f11;
fB[f_mem(12,x,y,z,pitch)] = f12;
fB[f_mem(13,x,y,z,pitch)] = f13;
fB[f_mem(14,x,y,z,pitch)] = f14;
fB[f_mem(15,x,y,z,pitch)] = f15;
fB[f_mem(16,x,y,z,pitch)] = f16;
fB[f_mem(17,x,y,z,pitch)] = f17;
fB[f_mem(18,x,y,z,pitch)] = f18;
}
// }
}
__global__ void update_inner(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*ZDIM/2+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
// && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)){
// }
// else{
if(z==ZDIM/2-1-1){//top nodes need info from h
f0 = fA[j];
f1 = fA[f_mem (1 ,dmax(x-1) ,y ,z ,pitch)];
f3 = fA[f_mem (3 ,dmin(x+1,XDIM),y ,z ,pitch)];
f2 = fA[f_mem (2 ,x ,dmax(y-1) ,z ,pitch)];
f5 = fA[f_mem (5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch)];
f6 = fA[f_mem (6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch)];
f4 = fA[f_mem (4 ,x ,dmin(y+1,YDIM),z ,pitch)];
f7 = fA[f_mem (7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch)];
f8 = fA[f_mem (8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch)];
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch)];
f10= fA[f_mem (10,dmax(x-1) ,y ,z-1,pitch)];
f11= fA[f_mem (11,x ,dmax(y-1) ,z-1,pitch)];
f12= fA[f_mem (12,dmin(x+1,XDIM),y ,z-1,pitch)];
f13= fA[f_mem (13,x ,dmin(y+1,YDIM),z-1,pitch)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,dmax(x-1) ,y ,pitch)];
f16= h [buff_mem(16,x ,dmax(y-1) ,pitch)];
f17= h [buff_mem(17,dmin(x+1,XDIM),y ,pitch)];
f18= h [buff_mem(18,x ,dmin(y+1,YDIM),pitch)];
}
else if(z==0){//bottom nodes need info from g
f0 = fA[j];
f1 = fA[f_mem (1 ,dmax(x-1) ,y ,z ,pitch)];
f3 = fA[f_mem (3 ,dmin(x+1,XDIM),y ,z ,pitch)];
f2 = fA[f_mem (2 ,x ,dmax(y-1) ,z ,pitch)];
f5 = fA[f_mem (5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch)];
f6 = fA[f_mem (6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch)];
f4 = fA[f_mem (4 ,x ,dmin(y+1,YDIM),z ,pitch)];
f7 = fA[f_mem (7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch)];
f8 = fA[f_mem (8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch)];
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,dmax(x-1) ,y ,pitch)];
f11= g [buff_mem(11,x ,dmax(y-1) ,pitch)];
f12= g [buff_mem(12,dmin(x+1,XDIM),y ,pitch)];
f13= g [buff_mem(13,x ,dmin(y+1,YDIM),pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch)];
f15= fA[f_mem (15,dmax(x-1) ,y ,z+1,pitch)];
f16= fA[f_mem (16,x ,dmax(y-1) ,z+1,pitch)];
f17= fA[f_mem (17,dmin(x+1,XDIM),y ,z+1,pitch)];
f18= fA[f_mem (18,x ,dmin(y+1,YDIM),z+1,pitch)];
}
else{//normal nodes
f0 = fA[j];
f1 = fA[f_mem(1 ,dmax(x-1) ,y ,z,pitch)];
f3 = fA[f_mem(3 ,dmin(x+1,XDIM),y ,z,pitch)];
f2 = fA[f_mem(2 ,x ,dmax(y-1) ,z,pitch)];
f5 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z,pitch)];
f6 = fA[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z,pitch)];
f4 = fA[f_mem(4 ,x ,dmin(y+1,YDIM),z,pitch)];
f7 = fA[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z,pitch)];
f8 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z,pitch)];
f9 = fA[f_mem(9 ,x ,y ,z,pitch)];
f10= fA[f_mem(10,dmax(x-1) ,y ,z,pitch)];
f11= fA[f_mem(11,x ,dmax(y-1) ,z,pitch)];
f12= fA[f_mem(12,dmin(x+1,XDIM),y ,z,pitch)];
f13= fA[f_mem(13,x ,dmin(y+1,YDIM),z,pitch)];
f14= fA[f_mem(14,x ,y ,z,pitch)];
f15= fA[f_mem(15,dmax(x-1) ,y ,z,pitch)];
f16= fA[f_mem(16,x ,dmax(y-1) ,z,pitch)];
f17= fA[f_mem(17,dmin(x+1,XDIM),y ,z,pitch)];
f18= fA[f_mem(18,x ,dmin(y+1,YDIM),z,pitch)];
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch)] = f14;
fB[f_mem(10,x,y,z,pitch)] = f17;
fB[f_mem(11,x,y,z,pitch)] = f18;
fB[f_mem(12,x,y,z,pitch)] = f15;
fB[f_mem(13,x,y,z,pitch)] = f16;
fB[f_mem(14,x,y,z,pitch)] = f9 ;
fB[f_mem(15,x,y,z,pitch)] = f12;
fB[f_mem(16,x,y,z,pitch)] = f13;
fB[f_mem(17,x,y,z,pitch)] = f10;
fB[f_mem(18,x,y,z,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch)];
f10= fA[f_mem(10,x,y-1,z,pitch)];
f11= fA[f_mem(11,x,y-1,z,pitch)];
f12= fA[f_mem(12,x,y-1,z,pitch)];
f13= fA[f_mem(13,x,y-1,z,pitch)];
f14= fA[f_mem(14,x,y-1,z,pitch)];
f15= fA[f_mem(15,x,y-1,z,pitch)];
f16= fA[f_mem(16,x,y-1,z,pitch)];
f17= fA[f_mem(17,x,y-1,z,pitch)];
f18= fA[f_mem(18,x,y-1,z,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch)];
f10= fA[f_mem(10,x,y+1,z,pitch)];
f11= fA[f_mem(11,x,y+1,z,pitch)];
f12= fA[f_mem(12,x,y+1,z,pitch)];
f13= fA[f_mem(13,x,y+1,z,pitch)];
f14= fA[f_mem(14,x,y+1,z,pitch)];
f15= fA[f_mem(15,x,y+1,z,pitch)];
f16= fA[f_mem(16,x,y+1,z,pitch)];
f17= fA[f_mem(17,x,y+1,z,pitch)];
f18= fA[f_mem(18,x,y+1,z,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch)] = f9 ;
fB[f_mem(10,x,y,z,pitch)] = f10;
fB[f_mem(11,x,y,z,pitch)] = f11;
fB[f_mem(12,x,y,z,pitch)] = f12;
fB[f_mem(13,x,y,z,pitch)] = f13;
fB[f_mem(14,x,y,z,pitch)] = f14;
fB[f_mem(15,x,y,z,pitch)] = f15;
fB[f_mem(16,x,y,z,pitch)] = f16;
fB[f_mem(17,x,y,z,pitch)] = f17;
fB[f_mem(18,x,y,z,pitch)] = f18;
}
// }
}
__global__ void update_bottom(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*ZDIM/2);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = gA [j];
f1 = gA [buff_mem(1 ,dmax(x-1) ,y ,pitch)];
f3 = gA [buff_mem(3 ,dmin(x+1,XDIM),y ,pitch)];
f2 = gA [buff_mem(2 ,x ,dmax(y-1) ,pitch)];
f5 = gA [buff_mem(5 ,dmax(x-1) ,dmax(y-1) ,pitch)];
f6 = gA [buff_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,pitch)];
f4 = gA [buff_mem(4 ,x ,dmin(y+1,YDIM),pitch)];
f7 = gA [buff_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),pitch)];
f8 = gA [buff_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,dmax(x-1) ,y ,pitch)];
f11= temp[buff_mem(11,x ,dmax(y-1) ,pitch)];
f12= temp[buff_mem(12,dmin(x+1,XDIM),y ,pitch)];
f13= temp[buff_mem(13,x ,dmin(y+1,YDIM),pitch)];
f14= f [f_mem (14,x ,y ,0,pitch)];
f15= f [f_mem (15,dmax(x-1) ,y ,0,pitch)];
f16= f [f_mem (16,x ,dmax(y-1) ,0,pitch)];
f17= f [f_mem (17,dmin(x+1,XDIM),y ,0,pitch)];
f18= f [f_mem (18,x ,dmin(y+1,YDIM),0,pitch)];
if(im == 1 || im ==10){//BB
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*ZDIM/2,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_top(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*ZDIM/2-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_mem(1 ,dmax(x-1) ,y ,pitch)];
f3 = hA [buff_mem(3 ,dmin(x+1,XDIM),y ,pitch)];
f2 = hA [buff_mem(2 ,x ,dmax(y-1) ,pitch)];
f5 = hA [buff_mem(5 ,dmax(x-1) ,dmax(y-1) ,pitch)];
f6 = hA [buff_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,pitch)];
f4 = hA [buff_mem(4 ,x ,dmin(y+1,YDIM),pitch)];
f7 = hA [buff_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),pitch)];
f8 = hA [buff_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),pitch)];
f9 = f [f_mem (9 ,x ,y ,ZDIM/2-2-1,pitch)];
f10= f [f_mem (10,dmax(x-1) ,y ,ZDIM/2-2-1,pitch)];
f11= f [f_mem (11,x ,dmax(y-1) ,ZDIM/2-2-1,pitch)];
f12= f [f_mem (12,dmin(x+1,XDIM),y ,ZDIM/2-2-1,pitch)];
f13= f [f_mem (13,x ,dmin(y+1,YDIM),ZDIM/2-2-1,pitch)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,dmax(x-1) ,y ,pitch)];
f16= temp[buff_mem(16,x ,dmax(y-1) ,pitch)];
f17= temp[buff_mem(17,dmin(x+1,XDIM),y ,pitch)];
f18= temp[buff_mem(18,x ,dmin(y+1,YDIM),pitch)];
if(im == 1 || im ==10){//BB
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*ZDIM/2,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__device__ __inline__ float ld_gb1_cg(const float *addr)
{
float return_value;
asm("ld.global.cg.f32 %0, [%1];" : "=f"(return_value) : "l"(addr));
return return_value;
}
__global__ void initialize_single(float *f, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,z);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.05f;
v = UMAX;
w = 0.0f;
// if(im == 10 || im == 1){
// u = 0.0f;
// v = 0.0f;
// w = 0.0f;
// }
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
float f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f[j+0 *pitch*YDIM*(ZDIM/2-2)]=f0 ;
f[j+1 *pitch*YDIM*(ZDIM/2-2)]=f1 ;
f[j+2 *pitch*YDIM*(ZDIM/2-2)]=f2 ;
f[j+3 *pitch*YDIM*(ZDIM/2-2)]=f3 ;
f[j+4 *pitch*YDIM*(ZDIM/2-2)]=f4 ;
f[j+5 *pitch*YDIM*(ZDIM/2-2)]=f5 ;
f[j+6 *pitch*YDIM*(ZDIM/2-2)]=f6 ;
f[j+7 *pitch*YDIM*(ZDIM/2-2)]=f7 ;
f[j+8 *pitch*YDIM*(ZDIM/2-2)]=f8 ;
f[j+9 *pitch*YDIM*(ZDIM/2-2)]=f9 ;
f[j+10*pitch*YDIM*(ZDIM/2-2)]=f10;
f[j+11*pitch*YDIM*(ZDIM/2-2)]=f11;
f[j+12*pitch*YDIM*(ZDIM/2-2)]=f12;
f[j+13*pitch*YDIM*(ZDIM/2-2)]=f13;
f[j+14*pitch*YDIM*(ZDIM/2-2)]=f14;
f[j+15*pitch*YDIM*(ZDIM/2-2)]=f15;
f[j+16*pitch*YDIM*(ZDIM/2-2)]=f16;
f[j+17*pitch*YDIM*(ZDIM/2-2)]=f17;
f[j+18*pitch*YDIM*(ZDIM/2-2)]=f18;
}
if(x == XDIM-1){
for(int i = XDIM; i<pitch; i++){
j = i+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
f[j+0 *pitch*YDIM*ZDIM]=0.f;
f[j+1 *pitch*YDIM*ZDIM]=0.f;
f[j+2 *pitch*YDIM*ZDIM]=0.f;
f[j+3 *pitch*YDIM*ZDIM]=0.f;
f[j+4 *pitch*YDIM*ZDIM]=0.f;
f[j+5 *pitch*YDIM*ZDIM]=0.f;
f[j+6 *pitch*YDIM*ZDIM]=0.f;
f[j+7 *pitch*YDIM*ZDIM]=0.f;
f[j+8 *pitch*YDIM*ZDIM]=0.f;
f[j+9 *pitch*YDIM*ZDIM]=0.f;
f[j+10*pitch*YDIM*ZDIM]=0.f;
f[j+11*pitch*YDIM*ZDIM]=0.f;
f[j+12*pitch*YDIM*ZDIM]=0.f;
f[j+13*pitch*YDIM*ZDIM]=0.f;
f[j+14*pitch*YDIM*ZDIM]=0.f;
f[j+15*pitch*YDIM*ZDIM]=0.f;
f[j+16*pitch*YDIM*ZDIM]=0.f;
f[j+17*pitch*YDIM*ZDIM]=0.f;
f[j+18*pitch*YDIM*ZDIM]=0.f;
}
}
}
__global__ void initialize_buffer(float *g, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.05f;
v = UMAX;
w = 0.0f;
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
g[j+0 *pitch*YDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
g[j+1 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+2 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+3 *pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+4 *pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+5 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
g[j+6 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
g[j+7 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
g[j+8 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
g[j+9 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+10*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
g[j+11*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
g[j+12*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
g[j+13*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
g[j+14*pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+15*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
g[j+16*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
g[j+17*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
g[j+18*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
float f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
g[j+0 *pitch*YDIM]=f0 ;
g[j+1 *pitch*YDIM]=f1 ;
g[j+2 *pitch*YDIM]=f2 ;
g[j+3 *pitch*YDIM]=f3 ;
g[j+4 *pitch*YDIM]=f4 ;
g[j+5 *pitch*YDIM]=f5 ;
g[j+6 *pitch*YDIM]=f6 ;
g[j+7 *pitch*YDIM]=f7 ;
g[j+8 *pitch*YDIM]=f8 ;
g[j+9 *pitch*YDIM]=f9 ;
g[j+10*pitch*YDIM]=f10;
g[j+11*pitch*YDIM]=f11;
g[j+12*pitch*YDIM]=f12;
g[j+13*pitch*YDIM]=f13;
g[j+14*pitch*YDIM]=f14;
g[j+15*pitch*YDIM]=f15;
g[j+16*pitch*YDIM]=f16;
g[j+17*pitch*YDIM]=f17;
g[j+18*pitch*YDIM]=f18;
}
}
int main(int argc, char *argv[])
{
int GPU_N;
hipGetDeviceCount(&GPU_N);
GPU_N = 2;
cout<<"number of GPUs: "<<GPU_N<<endl;
//int *image_d, *image_h;
ofstream output;
ofstream output2;
string FileName = CASENAME;
//output.open ("LBM1_out.dat");
output.open ((FileName+".dat").c_str());
output2.open ((FileName+".force").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch = pitch*sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
int i, n, nBlocks;
float omega, CharLength;
CharLength = OBSTR1*2.f;
omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
cout<<"omega : "<<omega<<endl;
cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
cout<<"TMAX: "<<TMAX<<endl;
cout<<"Method: "<<METHOD<<endl;
cout<<"Model: "<<MODEL<<endl;
int zInner = ZDIM/GPU_N-2;
//nBlocks does not include the halo layers
nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY)
*((zInner+BLOCKSIZEZ-1)/BLOCKSIZEZ);
int B = BLOCKSIZEX*BLOCKSIZEY*BLOCKSIZEZ;
n = nBlocks*B;
cout<<"nBlocks:"<<nBlocks<<endl;
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 h_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
hipSetDevice(0);
hipStream_t stream[3];
hipStreamCreate(&stream[0]);
hipStreamCreate(&stream[1]);
hipStreamCreate(&stream[2]);
hipDeviceEnablePeerAccess(1,0);
//buffers g,h
float *gA_h;
float *gA_d ,*gB_d;
float *gA_temp_d;
float *hA_h;
float *hA_d ,*hB_d;
float *hA_temp_d;
//image_h = (int *)malloc(memsize_int);
float *fA_h,*fA_d,*fB_d;
float *FX_h,*FY_h,*FZ_h,*FX_d,*FY_d,*FZ_d;
gA_h = (float *)malloc(XDIM*YDIM*19*sizeof(float));
hA_h = (float *)malloc(XDIM*YDIM*19*sizeof(float));
fA_h = (float *)malloc(XDIM*YDIM*zInner*sizeof(float)*19);//(float *)malloc(memsize *19);
FX_h = (float *)malloc(TMAX*sizeof(float));
FY_h = (float *)malloc(TMAX*sizeof(float));
FZ_h = (float *)malloc(TMAX*sizeof(float));
// hipMallocPitch((void **) &fA_d, &pitch, XDIM*sizeof(float), YDIM*zInner*19);
// hipMallocPitch((void **) &fB_d, &pitch, XDIM*sizeof(float), YDIM*zInner*19);
// hipMallocPitch((void **) &gA_d, &pitch, XDIM*sizeof(float), YDIM*19);
// hipMallocPitch((void **) &gB_d, &pitch, XDIM*sizeof(float), YDIM*19);
// hipMallocPitch((void **) &hA_d, &pitch, XDIM*sizeof(float), YDIM*19);
// hipMallocPitch((void **) &hB_d, &pitch, XDIM*sizeof(float), YDIM*19);
// hipMallocPitch((void **) &gA_temp_d, &pitch, XDIM*sizeof(float), YDIM*19);
// hipMallocPitch((void **) &hA_temp_d, &pitch, XDIM*sizeof(float), YDIM*19);
hipMalloc((void **) &fA_d , pitch*YDIM*19*sizeof(float)*zInner);
hipMalloc((void **) &fB_d , pitch*YDIM*19*sizeof(float)*zInner);
hipMalloc((void **) &gA_d , pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &gB_d , pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &hA_d , pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &hB_d , pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &gA_temp_d, pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &hA_temp_d, pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &FX_d, TMAX*sizeof(float));
hipMalloc((void **) &FY_d, TMAX*sizeof(float));
hipMalloc((void **) &FZ_d, TMAX*sizeof(float));
size_t pitch_elements = pitch/sizeof(float);
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
//initialize host f
for (i = 0; i < n*19; i++)
{
fA_h[i] = 0;
}
//initialize host g,h
for (i = 0; i < XDIM*YDIM*19; i++)
{
gA_h[i] = 0;
hA_h[i] = 0;
}
//initialize host FX
for (i = 0; i < TMAX; i++){
FX_h[i] = 0.f;
FY_h[i] = 0.f;
FZ_h[i] = 0.f;
}
//memcpy FX
hipMemcpy(FX_d, FX_h, TMAX*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(FY_d, FY_h, TMAX*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(FZ_d, FZ_h, TMAX*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy2D(fA_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(fB_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(gA_d,pitch ,gA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,hipMemcpyHostToDevice);
hipMemcpy2D(gB_d,pitch ,gA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,hipMemcpyHostToDevice);
hipMemcpy2D(hA_d,pitch ,hA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,hipMemcpyHostToDevice);
hipMemcpy2D(hB_d,pitch ,hA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,hipMemcpyHostToDevice);
hipSetDevice(1);
hipDeviceEnablePeerAccess(0,0);
hipStream_t stream2[3];
hipStreamCreate(&stream2[0]);
hipStreamCreate(&stream2[1]);
hipStreamCreate(&stream2[2]);
float *gA_d2,*gB_d2;
float *gA_h2;
float *gA_temp_d2;
float *hA_d2,*hB_d2;
float *hA_h2;
float *hA_temp_d2;
float *fA_h2,*fA_d2,*fB_d2;
float *FX_h2,*FY_h2,*FZ_h2,*FX_d2,*FY_d2,*FZ_d2;
gA_h2 = (float *)malloc(XDIM*YDIM*19*sizeof(float));
hA_h2 = (float *)malloc(XDIM*YDIM*19*sizeof(float));
fA_h2 = (float *)malloc(XDIM*YDIM*zInner*sizeof(float)*19);//(float *)malloc(memsize*19);
FX_h2 = (float *)malloc(TMAX*sizeof(float));
FY_h2 = (float *)malloc(TMAX*sizeof(float));
FZ_h2 = (float *)malloc(TMAX*sizeof(float));
// hipMallocPitch((void **) &fA_d2, &pitch, XDIM*sizeof(float), YDIM*zInner*19);
// hipMallocPitch((void **) &fB_d2, &pitch, XDIM*sizeof(float), YDIM*zInner*19);
// hipMallocPitch((void **) &gA_d2, &pitch, XDIM*sizeof(float), YDIM*19);
// hipMallocPitch((void **) &gB_d2, &pitch, XDIM*sizeof(float), YDIM*19);
// hipMallocPitch((void **) &hA_d2, &pitch, XDIM*sizeof(float), YDIM*19);
// hipMallocPitch((void **) &hB_d2, &pitch, XDIM*sizeof(float), YDIM*19);
// hipMallocPitch((void **) &gA_temp_d2, &pitch, XDIM*sizeof(float), YDIM*19);
// hipMallocPitch((void **) &hA_temp_d2, &pitch, XDIM*sizeof(float), YDIM*19);
hipMalloc((void **) &fA_d2 , pitch*YDIM*19*sizeof(float)*zInner);
hipMalloc((void **) &fB_d2 , pitch*YDIM*19*sizeof(float)*zInner);
hipMalloc((void **) &gA_d2 , pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &gB_d2 , pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &hA_d2 , pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &hB_d2 , pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &gA_temp_d2, pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &hA_temp_d2, pitch*YDIM*19*sizeof(float));
hipMalloc((void **) &FX_d2, TMAX*sizeof(float));
hipMalloc((void **) &FY_d2, TMAX*sizeof(float));
hipMalloc((void **) &FZ_d2, TMAX*sizeof(float));
for (i = 0; i < n*19; i++)
{
fA_h2[i] = 0;
}
//initialize host g,h
for (i = 0; i < XDIM*YDIM*19; i++)
{
gA_h2[i] = 0;
hA_h2[i] = 0;
}
for (i = 0; i < TMAX; i++){
FX_h2[i] = 0.f;
FY_h2[i] = 0.f;
FZ_h2[i] = 0.f;
}
hipMemcpy(FX_d2, FX_h2, TMAX*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(FY_d2, FY_h2, TMAX*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(FZ_d2, FZ_h2, TMAX*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy2D(fA_d2,pitch ,fA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(fB_d2,pitch ,fA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(gA_d2,pitch ,gA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,hipMemcpyHostToDevice);
hipMemcpy2D(gB_d2,pitch ,gA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,hipMemcpyHostToDevice);
hipMemcpy2D(hA_d2,pitch ,hA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,hipMemcpyHostToDevice);
hipMemcpy2D(hB_d2,pitch ,hA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,hipMemcpyHostToDevice);
hipSetDevice(0);
hipLaunchKernelGGL(( initialize_single), dim3(grid), dim3(threads), 0, 0, fA_d,pitch_elements);
hipLaunchKernelGGL(( initialize_single), dim3(grid), dim3(threads), 0, 0, fB_d,pitch_elements);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, gA_d,pitch_elements);
hipLaunchKernelGGL(( initialize_buffer), dim3(h_grid), dim3(threads), 0, 0, hA_d,pitch_elements);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, gB_d,pitch_elements);
hipLaunchKernelGGL(( initialize_buffer), dim3(h_grid), dim3(threads), 0, 0, hB_d,pitch_elements);
hipSetDevice(1);
hipLaunchKernelGGL(( initialize_single), dim3(grid), dim3(threads), 0, 0, fA_d2,pitch_elements);
hipLaunchKernelGGL(( initialize_single), dim3(grid), dim3(threads), 0, 0, fB_d2,pitch_elements);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, gA_d2,pitch_elements);
hipLaunchKernelGGL(( initialize_buffer), dim3(h_grid), dim3(threads), 0, 0, hA_d2,pitch_elements);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, gB_d2,pitch_elements);
hipLaunchKernelGGL(( initialize_buffer), dim3(h_grid), dim3(threads), 0, 0, hB_d2,pitch_elements);
hipSetDevice(0);
struct timeval tdr0,tdr1;
double restime;
hipDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//time loop
for(int t = 0; t<TMAX; t+=2){
//memcpy from dev0 to dev1
//hipMemcpyPeerAsync(*dst,dstdev#,*src,srcdev#,size_t(bytes),cudastream#);
hipSetDevice(0);
//send g from dev1 to h of dev0
hipMemcpyPeerAsync(&hA_temp_d[pitch_elements*YDIM*14],0,&gA_d2[pitch_elements*YDIM*14],1,pitch_elements*YDIM*sizeof(float)*5,stream[0]);
//send g from dev0 to h of dev1
hipMemcpyPeerAsync(&hA_temp_d2[pitch_elements*YDIM*14],1,&gA_d[pitch_elements*YDIM*14],0,pitch_elements*YDIM*sizeof(float)*5,stream2[0]);
//send h from dev1 to g of dev0
hipMemcpyPeerAsync(&gA_temp_d[pitch_elements*YDIM*9],0,&hA_d2[pitch_elements*YDIM*9],1,pitch_elements*YDIM*sizeof(float)*5,stream[0]);
//send h from dev0 to g of dev1
hipMemcpyPeerAsync(&gA_temp_d2[pitch_elements*YDIM*9],1,&hA_d[pitch_elements*YDIM*9],0,pitch_elements*YDIM*sizeof(float)*5,stream2[0]);
hipSetDevice(0);
hipLaunchKernelGGL(( update_inner) , dim3(grid), dim3(threads), 0, stream[1], fA_d,fB_d,gA_d,hA_d,omega,pitch_elements,0);
hipLaunchKernelGGL(( update_top) , dim3(h_grid), dim3(threads), 0, stream[0], hA_d,hB_d,fA_d,hA_temp_d,omega,pitch_elements,0);
hipLaunchKernelGGL(( update_bottom), dim3(h_grid), dim3(threads), 0, stream[0], gA_d,gB_d,fA_d,gA_temp_d,omega,pitch_elements,0);
hipSetDevice(1);
hipLaunchKernelGGL(( update_inner) , dim3(grid), dim3(threads), 0, stream2[1], fA_d2,fB_d2,gA_d2,hA_d2,omega,pitch_elements,1);
hipLaunchKernelGGL(( update_top) , dim3(h_grid), dim3(threads), 0, stream2[0], hA_d2,hB_d2,fA_d2,hA_temp_d2,omega,pitch_elements,1);
hipLaunchKernelGGL(( update_bottom), dim3(h_grid), dim3(threads), 0, stream2[0], gA_d2,gB_d2,fA_d2,gA_temp_d2,omega,pitch_elements,1);
hipDeviceSynchronize();
hipSetDevice(0);
//send g from dev1 to h of dev0
hipMemcpyPeerAsync(&hA_temp_d[pitch_elements*YDIM*14],0,&gB_d2[pitch_elements*YDIM*14],1,pitch_elements*YDIM*sizeof(float)*5,stream[0]);
//send g from dev0 to h of dev1
hipMemcpyPeerAsync(&hA_temp_d2[pitch_elements*YDIM*14],1,&gB_d[pitch_elements*YDIM*14],0,pitch_elements*YDIM*sizeof(float)*5,stream2[0]);
//send h from dev1 to g of dev0
hipMemcpyPeerAsync(&gA_temp_d[pitch_elements*YDIM*9],0,&hB_d2[pitch_elements*YDIM*9],1,pitch_elements*YDIM*sizeof(float)*5,stream[0]);
//send h from dev0 to g of dev1
hipMemcpyPeerAsync(&gA_temp_d2[pitch_elements*YDIM*9],1,&hB_d[pitch_elements*YDIM*9],0,pitch_elements*YDIM*sizeof(float)*5,stream2[0]);
hipSetDevice(0);
hipLaunchKernelGGL(( update_inner) , dim3(grid), dim3(threads), 0, stream[1], fB_d,fA_d,gB_d,hB_d,omega,pitch_elements,0);
hipLaunchKernelGGL(( update_top) , dim3(h_grid), dim3(threads), 0, stream[0], hB_d,hA_d,fB_d,hA_temp_d,omega,pitch_elements,0);
hipLaunchKernelGGL(( update_bottom), dim3(h_grid), dim3(threads), 0, stream[0], gB_d,gA_d,fB_d,gA_temp_d,omega,pitch_elements,0);
hipSetDevice(1);
hipLaunchKernelGGL(( update_inner) , dim3(grid), dim3(threads), 0, stream2[1], fB_d2,fA_d2,gB_d2,hB_d2,omega,pitch_elements,1);
hipLaunchKernelGGL(( update_top) , dim3(h_grid), dim3(threads), 0, stream2[0], hB_d2,hA_d2,fB_d2,hA_temp_d2,omega,pitch_elements,1);
hipLaunchKernelGGL(( update_bottom), dim3(h_grid), dim3(threads), 0, stream2[0], gB_d2,gA_d2,fB_d2,gA_temp_d2,omega,pitch_elements,1);
hipDeviceSynchronize();
// if(METHOD == "SINGLE"){
// if(t >= STARTF)
// mrt_d_single_force<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t,uAv_d,vAv_d,ufluc_d,vfluc_d,0);
// else
// mrt_d_single<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements,0);
//
//
// if(t >= STARTF)
// mrt_d_single_force<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t+1,uAv_d,vAv_d,ufluc_d,vfluc_d,0);
// else
// mrt_d_single<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements,0);
// }
//
// hipSetDevice(1);
//
// if(METHOD == "SINGLE"){
// if(t >= STARTF)
// mrt_d_single_force<<<grid, threads>>>(fA_d2,fB_d2,omega,pitch_elements,FX_d2,FY_d2,FZ_d2,t,uAv_d2,vAv_d2,ufluc_d2,vfluc_d2,1);
// else
// mrt_d_single<<<grid, threads>>>(fA_d2,fB_d2,omega,pitch_elements,1);
//
//
// if(t >= STARTF)
// mrt_d_single_force<<<grid, threads>>>(fB_d2,fA_d2,omega,pitch_elements,FX_d2,FY_d2,FZ_d2,t+1,uAv_d2,vAv_d2,ufluc_d2,vfluc_d2,1);
// else
// mrt_d_single<<<grid, threads>>>(fB_d2,fA_d2,omega,pitch_elements,1);
// }
// if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n";
}
hipDeviceSynchronize();
hipSetDevice(0);
hipDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)";
cout<<endl;
cout<<XDIM<<","<<YDIM<<","<<ZDIM<<","<<TMAX<<","<<restime<<endl;
hipMemcpy2D(fA_h,XDIM*sizeof(float),fB_d,pitch,XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D(gA_h,XDIM*sizeof(float),gB_d,pitch,XDIM*sizeof(float),YDIM*19,hipMemcpyDeviceToHost);
hipMemcpy2D(hA_h,XDIM*sizeof(float),hB_d,pitch,XDIM*sizeof(float),YDIM*19,hipMemcpyDeviceToHost);
hipMemcpy(FX_h, FX_d, TMAX*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(FY_h, FY_d, TMAX*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(FZ_h, FZ_d, TMAX*sizeof(float), hipMemcpyDeviceToHost);
hipSetDevice(1);
hipMemcpy2D(fA_h2,XDIM*sizeof(float),fB_d2,pitch,XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D(gA_h2,XDIM*sizeof(float),gB_d2,pitch,XDIM*sizeof(float),YDIM*19,hipMemcpyDeviceToHost);
hipMemcpy2D(hA_h2,XDIM*sizeof(float),hB_d2,pitch,XDIM*sizeof(float),YDIM*19,hipMemcpyDeviceToHost);
hipMemcpy(FX_h2, FX_d2, TMAX*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(FY_h2, FY_d2, TMAX*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(FZ_h2, FZ_d2, TMAX*sizeof(float), hipMemcpyDeviceToHost);
hipSetDevice(0);
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n";
int row = 0;
int col = 0;
int dep = 0;
i = 0;
float rho, u, v, w;//, usqr;
//int j;
int check = 0;
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = row*XDIM+col;
f0 = gA_h[i+XDIM*YDIM*0 ];
f1 = gA_h[i+XDIM*YDIM*1 ];
f2 = gA_h[i+XDIM*YDIM*2 ];
f3 = gA_h[i+XDIM*YDIM*3 ];
f4 = gA_h[i+XDIM*YDIM*4 ];
f5 = gA_h[i+XDIM*YDIM*5 ];
f6 = gA_h[i+XDIM*YDIM*6 ];
f7 = gA_h[i+XDIM*YDIM*7 ];
f8 = gA_h[i+XDIM*YDIM*8 ];
f9 = gA_h[i+XDIM*YDIM*9 ];
f10= gA_h[i+XDIM*YDIM*10];
f11= gA_h[i+XDIM*YDIM*11];
f12= gA_h[i+XDIM*YDIM*12];
f13= gA_h[i+XDIM*YDIM*13];
f14= gA_h[i+XDIM*YDIM*14];
f15= gA_h[i+XDIM*YDIM*15];
f16= gA_h[i+XDIM*YDIM*16];
f17= gA_h[i+XDIM*YDIM*17];
f18= gA_h[i+XDIM*YDIM*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<0<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}
}
for(dep = 0; dep<zInner; dep++){
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = dep*XDIM*YDIM+row*XDIM+col;
f0 = fA_h[i+XDIM*YDIM*zInner*0 ];
f1 = fA_h[i+XDIM*YDIM*zInner*1 ];
f2 = fA_h[i+XDIM*YDIM*zInner*2 ];
f3 = fA_h[i+XDIM*YDIM*zInner*3 ];
f4 = fA_h[i+XDIM*YDIM*zInner*4 ];
f5 = fA_h[i+XDIM*YDIM*zInner*5 ];
f6 = fA_h[i+XDIM*YDIM*zInner*6 ];
f7 = fA_h[i+XDIM*YDIM*zInner*7 ];
f8 = fA_h[i+XDIM*YDIM*zInner*8 ];
f9 = fA_h[i+XDIM*YDIM*zInner*9 ];
f10= fA_h[i+XDIM*YDIM*zInner*10];
f11= fA_h[i+XDIM*YDIM*zInner*11];
f12= fA_h[i+XDIM*YDIM*zInner*12];
f13= fA_h[i+XDIM*YDIM*zInner*13];
f14= fA_h[i+XDIM*YDIM*zInner*14];
f15= fA_h[i+XDIM*YDIM*zInner*15];
f16= fA_h[i+XDIM*YDIM*zInner*16];
f17= fA_h[i+XDIM*YDIM*zInner*17];
f18= fA_h[i+XDIM*YDIM*zInner*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<dep+1<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
if(rho>-0.001f && rho<2.f){
}
else{
check = 1;
}
}
}
}
//top of GPU1
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = row*XDIM+col;
f0 = hA_h[i+XDIM*YDIM*0 ];
f1 = hA_h[i+XDIM*YDIM*1 ];
f2 = hA_h[i+XDIM*YDIM*2 ];
f3 = hA_h[i+XDIM*YDIM*3 ];
f4 = hA_h[i+XDIM*YDIM*4 ];
f5 = hA_h[i+XDIM*YDIM*5 ];
f6 = hA_h[i+XDIM*YDIM*6 ];
f7 = hA_h[i+XDIM*YDIM*7 ];
f8 = hA_h[i+XDIM*YDIM*8 ];
f9 = hA_h[i+XDIM*YDIM*9 ];
f10= hA_h[i+XDIM*YDIM*10];
f11= hA_h[i+XDIM*YDIM*11];
f12= hA_h[i+XDIM*YDIM*12];
f13= hA_h[i+XDIM*YDIM*13];
f14= hA_h[i+XDIM*YDIM*14];
f15= hA_h[i+XDIM*YDIM*15];
f16= hA_h[i+XDIM*YDIM*16];
f17= hA_h[i+XDIM*YDIM*17];
f18= hA_h[i+XDIM*YDIM*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<ZDIM/2-1<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}
}
//bottom of GPU2
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = row*XDIM+col;
f0 = gA_h2[i+XDIM*YDIM*0 ];
f1 = gA_h2[i+XDIM*YDIM*1 ];
f2 = gA_h2[i+XDIM*YDIM*2 ];
f3 = gA_h2[i+XDIM*YDIM*3 ];
f4 = gA_h2[i+XDIM*YDIM*4 ];
f5 = gA_h2[i+XDIM*YDIM*5 ];
f6 = gA_h2[i+XDIM*YDIM*6 ];
f7 = gA_h2[i+XDIM*YDIM*7 ];
f8 = gA_h2[i+XDIM*YDIM*8 ];
f9 = gA_h2[i+XDIM*YDIM*9 ];
f10= gA_h2[i+XDIM*YDIM*10];
f11= gA_h2[i+XDIM*YDIM*11];
f12= gA_h2[i+XDIM*YDIM*12];
f13= gA_h2[i+XDIM*YDIM*13];
f14= gA_h2[i+XDIM*YDIM*14];
f15= gA_h2[i+XDIM*YDIM*15];
f16= gA_h2[i+XDIM*YDIM*16];
f17= gA_h2[i+XDIM*YDIM*17];
f18= gA_h2[i+XDIM*YDIM*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<ZDIM/2<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}
}
for(dep = 0; dep<zInner; dep++){
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = dep*XDIM*YDIM+row*XDIM+col;
f0 = fA_h2[i+XDIM*YDIM*zInner*0 ];
f1 = fA_h2[i+XDIM*YDIM*zInner*1 ];
f2 = fA_h2[i+XDIM*YDIM*zInner*2 ];
f3 = fA_h2[i+XDIM*YDIM*zInner*3 ];
f4 = fA_h2[i+XDIM*YDIM*zInner*4 ];
f5 = fA_h2[i+XDIM*YDIM*zInner*5 ];
f6 = fA_h2[i+XDIM*YDIM*zInner*6 ];
f7 = fA_h2[i+XDIM*YDIM*zInner*7 ];
f8 = fA_h2[i+XDIM*YDIM*zInner*8 ];
f9 = fA_h2[i+XDIM*YDIM*zInner*9 ];
f10= fA_h2[i+XDIM*YDIM*zInner*10];
f11= fA_h2[i+XDIM*YDIM*zInner*11];
f12= fA_h2[i+XDIM*YDIM*zInner*12];
f13= fA_h2[i+XDIM*YDIM*zInner*13];
f14= fA_h2[i+XDIM*YDIM*zInner*14];
f15= fA_h2[i+XDIM*YDIM*zInner*15];
f16= fA_h2[i+XDIM*YDIM*zInner*16];
f17= fA_h2[i+XDIM*YDIM*zInner*17];
f18= fA_h2[i+XDIM*YDIM*zInner*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<ZDIM/2+dep+1<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h2[i]<<","<<vAv_h2[i]<<", "<<ufluc_h2[i]<<","<<vfluc_h2[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
if(rho>0.f && rho<2.f){
}
else{
check = 1;
}
}
}
}
//top of GPU2
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = row*XDIM+col;
f0 = hA_h2[i+XDIM*YDIM*0 ];
f1 = hA_h2[i+XDIM*YDIM*1 ];
f2 = hA_h2[i+XDIM*YDIM*2 ];
f3 = hA_h2[i+XDIM*YDIM*3 ];
f4 = hA_h2[i+XDIM*YDIM*4 ];
f5 = hA_h2[i+XDIM*YDIM*5 ];
f6 = hA_h2[i+XDIM*YDIM*6 ];
f7 = hA_h2[i+XDIM*YDIM*7 ];
f8 = hA_h2[i+XDIM*YDIM*8 ];
f9 = hA_h2[i+XDIM*YDIM*9 ];
f10= hA_h2[i+XDIM*YDIM*10];
f11= hA_h2[i+XDIM*YDIM*11];
f12= hA_h2[i+XDIM*YDIM*12];
f13= hA_h2[i+XDIM*YDIM*13];
f14= hA_h2[i+XDIM*YDIM*14];
f15= hA_h2[i+XDIM*YDIM*15];
f16= hA_h2[i+XDIM*YDIM*16];
f17= hA_h2[i+XDIM*YDIM*17];
f18= hA_h2[i+XDIM*YDIM*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<ZDIM-1<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}
}
if(check == 1) cout<<"error!"<<endl;
output.close();
//for(int t = STARTF-1; t<TMAX; t++){
for(int t = 0; t<TMAX; t++){
output2<<t<<", "<<FX_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<", "
<<FY_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<", "
<<FZ_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<endl;
// output2<<t<<", "<<FX_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<", "
// <<FY_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<", "
// <<FZ_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<endl;
}
output2.close();
//hipFree(image_d);
hipFree(fA_d);
hipFree(fB_d);
return(0);
}
| 597a30fedaeceebaf25a86915bf74af93797186b.cu | #include <cuda.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
#define CASENAME "Test31"
#define BLOCKSIZEX 64
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define XDIM 64
#define YDIM 64
#define ZDIM 10
#define TMAX 5000
#define STARTF 0
#define OBSTR1 4.f
#define OBSTX1 31.5f
#define OBSTY1 31.5f
#define OBSTZ1 15.5f
#define OBSTR2 4.f
#define OBSTX2 63.5f
#define OBSTY2 31.5f
#define OBSTZ2 31.5f
#define RE 100.f//2000.f//100.f;
#define UMAX 0.08f
#define METHOD "SINGLE" //SINGLE,HYB,TEXT,SHARED,CACHE
#define SmagLES "NO" //YES,NO
#define MODEL "MRT" //BGK,MRT,STREAM
#define ZPERIODIC "NO"
#define CS 0.04f
//#define CHARLENGTH = XDIM-2.f;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
/*
Image List:
0 fluid
1 BB
2
3 DirichletWest(simple)
10 BB(force)
13 DirichletWest_Reg
14 NeumannEast_Reg
15 DirichletNorth_Reg
16 DirichletSouth_Reg
21 ysymmetry_top
22 ysymmetry_bot
23 zsymmetry_top
24 zsymmetry_bot
25 xsymmetry_top
26 xsymmetry_bot
*/
inline __device__ int ImageFcn(float x, float y, float z){
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// return 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// return 10;
//if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
// return 10;
// }
// else
// //if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f)
// if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f)
// return 1;
// else if(x < 17.5f)
// return 13;
// else if(x > 78.5f)
// return 14;
// else
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
return 10;
else
return 0;
}
inline __device__ int ImageFcn(int x, int y, int z){
int value = 0;
//Cylinder
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
//Sphere
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
//// if(z == 0 || z == ZDIM-1)
//// return 1;
//// else
// return 10;
// }
// if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
// else if(y == 0)
// value = 200;//22;
// else if(y == YDIM-1)
// value = 100;
// else if(x == 0)
// value = 26;
// else if(x == XDIM-1)
// value = 25;
// else if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
//return value;
//Lid Driven Cavity
// if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// value = 1;
// else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2)
// return 1;
// else if(x == 0)
// return 1;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
if(y == 0)
value = 200;//22;
else if(y == YDIM-1)
value = 100;
else if(x == 0)
value = 26;
else if(x == XDIM-1)
value = 25;
// else if(x == 0)
// return 53;
// else if(x == XDIM-1)
// return 54;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// return 1.f;
}
__device__ void DirichletWest(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(y == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(y == YDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(zcoord)*1.5;
v = 0.0f;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float usqr = u*u+v*v+w*w;
f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f0 = 1.0f/3.0f*(rho-1.5f*usqr);
// f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
//// f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
//// f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
//// f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
//// f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
//// f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
//// f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
//// f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
//// f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
//// f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
//// f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
//// f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
//// f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void DirichletWest_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(y)*1.5;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f1 = f3+0.33333333f*u;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f5 = f7+0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f10= f17+0.166666667f*(u+w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f15= f12+0.166666667f*(u-w);
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
void __device__ DirichletWest_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
u = UMAX;//*PoisProf(z)*1.5;
//v = 0.0f;
//w = 0.0f;
float usqr = u*u;//+v*v+w*w;
float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f1 = feq1 +f3 -feq3 ;
f5 = feq5 +f7 -feq7 ;
f8 = feq8 +f6 -feq6 ;
f10= feq10+f17-feq17;
f15= feq15+f12-feq12;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
void __device__ NeumannEast_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
else if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
float rho = 1.0f;
//v = 0.0f;
//w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float usqr = u*u;//+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*( u+v)+4.5f*( u+v)*( u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*( u-v)+4.5f*( u-v)*( u-v)-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*( u+w)+4.5f*( u+w)*( u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*( v+w)+4.5f*( v+w)*( v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*( u-w)+4.5f*( u-w)*( u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*( v-w)+4.5f*( v-w)*( v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f3 = feq3 +f1 -feq1 ;
f7 = feq7 +f5 -feq5 ;
f6 = feq6 +f8 -feq8 ;
f17= feq17+f10-feq10;
f12= feq12+f15-feq15;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
__device__ void NeumannEast(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float u2 = u*u;
float v2 = v*v;
float w2 = w*w;
float usqr = u2+v2+w2;
// f3 = f1 -0.333333333f*u;
// f7 = f5 -0.166666667f*(u+v);
// f6 = f8 -0.166666667f*(u-v);
// f17= f10-0.166666667f*(u+w);
// f12= f15-0.166666667f*(u-w);
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void NeumannEast_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f3 = f1 -0.333333333f*u;
f7 = f5 -0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f17= f10-0.166666667f*(u+w);
f12= f15-0.166666667f*(u-w);
// f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+
// (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2));
// f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+
// (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v));
// f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+
// (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v));
// f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+
// (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w));
// f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+
// (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w));
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f4 = f2-0.33333333f*v;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f7 = f5-0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f13= f16-0.166666667f*(v-w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f18= f11-0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f2 = f4 +0.33333333f*v;
f5 = f7 +0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f16= f13+0.166666667f*(v-w);
f11= f18+0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void xsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f1 = f3 ;
f5 = f6 ;
f8 = f7 ;
f10= f12;
f15= f17;
}
__device__ void xsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
// else if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f3 = f1 ;
f6 = f5 ;
f7 = f8 ;
f12= f10;
f17= f15;
}
__device__ void ysymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
__device__ void ysymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
__device__ void zsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
__device__ void zsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
inline __device__ void boundaries(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
//DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
//NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// if(im == 4)//DirichletWest
// {
// NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
}
inline __device__ void boundaries_force(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
else if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//zsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//zsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void North_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float rho)
{
rho = 1.0f;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
inline __device__ void South_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float v)
{
float rho,u,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
// f0 =(1.f-omega)*f0 +omega*(0.3333333333f*(rho-1.5f*usqr));
// f1 =(1.f-omega)*f1 +omega*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =(1.f-omega)*f2 +omega*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =(1.f-omega)*f3 +omega*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =(1.f-omega)*f4 +omega*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =(1.f-omega)*f5 +omega*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =(1.f-omega)*f6 +omega*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =(1.f-omega)*f7 +omega*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =(1.f-omega)*f8 +omega*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =(1.f-omega)*f9 +omega*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=(1.f-omega)*f10+omega*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=(1.f-omega)*f11+omega*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=(1.f-omega)*f12+omega*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=(1.f-omega)*f13+omega*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=(1.f-omega)*f14+omega*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=(1.f-omega)*f15+omega*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=(1.f-omega)*f16+omega*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=(1.f-omega)*f17+omega*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=(1.f-omega)*f18+omega*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
f0 -=omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
f1 -=omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 -=omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
f3 -=omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
f4 -=omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
f5 -=omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
f6 -=omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
f7 -=omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
f8 -=omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
f9 -=omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
f10-=omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
f11-=omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr));
f12-=omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
f13-=omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr));
f14-=omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
f15-=omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
f16-=omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
f17-=omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
f18-=omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
inline __device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
//// float PI11 = -1.0f/38.0f*( (m1)+19.0f*omega* (m9));
//// float PI22 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
//// float PI33 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9;
// float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// //float Cs = 0.01f;
// omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
// //omega = 1.0f/(1.0f/omega+3.f*CS*Smag*LRFACTOR*LRFACTOR);
// //omega = 1.0f/(1.0f*LRLEVEL/1.99983f-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = LRFACTOR*(sqrt(4.f/9.f*tau0*tau0+8.f*CS*LRFACTOR*Q)-2.f/3.f*tau0)/(4.f*CS*LRFACTOR*LRFACTOR);
//omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
//float tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q));
omega = 1.f/tau;
//float tau = 3.f*nu0*LRFACTOR+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*LRFACTOR*LRFACTOR*Q)-tau0)*0.5f;
//omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void mrt_collide_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float Cs)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
// float PI11 = -0.026315789f*m1-0.5f *omega*m9;
// float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//
// float PI12 = -1.5f*omega*m13;
// float PI23 = -1.5f*omega*m14;
// float PI13 = -1.5f*omega*m15;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
// float PI11 = LRLEVEL*-1.0f/38.0f*( (m1)+19.0f*omega* (m9));
// float PI22 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
// float PI33 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)/3.0f;
// float Smag = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+PI12*PI12+PI23*PI23+PI13*PI13);
// omega = 1.0f/(3.0f*(nu0+Cs*Smag*LRLEVEL*LRLEVEL)+0.5f);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//
//float Smag = (sqrt(nu0*nu0+18.f*CS*Q)-nu0)/(6.f*CS);
//
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//
//float tau0 = 1.f/omega;
//float tau = 3.f*nu0+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*Q)-tau0)*0.5f;
//omega = 1.f/tau;
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = (sqrt(4.f/9.f*tau0*tau0+8.f*CS*Q)-2.f/3.f*tau0)/(4.f*CS);
//omega = 1.0f/(3.0f*(nu0+CS*Smag)+0.5f);
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*sqrt(2.f)*CS*Q));
omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void bgk_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18) -19.f*(u*u+v*v+w*w);
//float m2 = 12.f*f0+-4.f*f1+-4.f*f2+-4.f*f3+-4.f*f4+f5+f6+f7+f8+-4.f*f9+f10+f11+f12+f13+-4.f*f14+f15+f16+f17+f18 +7.53968254f*(u*u+v*v+w*w);
//float m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
//float m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
//float m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
//float m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
//float m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
//float m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
//float m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
//float m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//omega = 1.0f/(3.0f*(nu0+Cs*Smag*sqrt(2.f))+0.5f);
//omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*sqrt(2.f)*LRFACTOR);
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0c = 1.f/omega;
//float tau = tau0c+0.5*(-tau0c+sqrt(tau0c*tau0c+18.f*CS*Q));//tau_total of coarse mesh
//omega = 1.f/tau;//total omega on coarse mesh
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
//omega2= 1.f/tau;
SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);//for post-collision
//SF = omega*0.5f/omega2;//for post-streaming, pre-collision?
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_fc_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float omega2)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float PI11 = -0.026315789f*m1-0.5f *omega*m9;
//float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
//float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//float PI12 = -1.5f*omega*m13;
//float PI23 = -1.5f*omega*m14;
//float PI13 = -1.5f*omega*m15;
////we know Smag on fine mesh. Smag_c=Smag_f*sqrt(2)
//float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
////omega = 1.0f/(3.0f*(nu0+CS*Smag*sqrt(2.f))+0.5f);
////omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
////omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0f = 1.f/omega2;
//float tau0c = 1.f/omega;
//float tau = tau0f+0.5*(-tau0f+sqrt(tau0f*tau0f+18.f*CS*sqrt(2.f)*Q));//tau_total of fine
//omega2 = 1.f/tau;//total omega on fine mesh
//tau = LRLEVEL*(tau-tau0f)+tau0c;
//omega= 1.f/tau;
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*Q));
float SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);
//float SF = omega2*2.f/omega;
//float SF = ((1.0f-omega)*omega2/LRFACTOR)/(omega*(1.0f-omega2));
//SF = omega*2.f/omega2;
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch)
{
return (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(ZDIM/2-2);
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
return (x+y*pitch)+f_num*pitch*YDIM;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
//__global__ void mrt_d_single_force(float* fin, float* fout,
// float omega, size_t pitch, float *FX, float *FY, float *FZ,
// int t,float *uAv, float *vAv, float *ufluc, float *vfluc, int GPU)
//{
// int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
// int y = threadIdx.y+blockIdx.y*blockDim.y;//;
// int z = threadIdx.z+blockIdx.z*blockDim.z+ZDIM/2*GPU;
// int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
// float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
// __shared__ int check[1];
// check[0] = 0;
// syncthreads();
// float u_Av, v_Av, u_fluc, v_fluc;
//
//
//// if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 &&
//// y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 &&
//// z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
//// (x>XDIM-1)))
//// {
//// }
//// else{
// int im = ImageFcn(x,y,z);
//
// f0 = fin[j];
// f1 = fin[f_mem(1 ,x-1,y ,z ,pitch)];
// f3 = fin[f_mem(3 ,x+1,y ,z ,pitch)];
// f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)];
// f5 = fin[f_mem(5 ,x-1,y-1,z ,pitch)];
// f6 = fin[f_mem(6 ,x+1,y-1,z ,pitch)];
// f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)];
// f7 = fin[f_mem(7 ,x+1,y+1,z ,pitch)];
// f8 = fin[f_mem(8 ,x-1,y+1,z ,pitch)];
// if(z != 0){
// f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)];
// f10= fin[f_mem(10,x-1,y ,z-1,pitch)];
// f11= fin[f_mem(11,x ,y-1,z-1,pitch)];
// f12= fin[f_mem(12,x+1,y ,z-1,pitch)];
// f13= fin[f_mem(13,x ,y+1,z-1,pitch)];
// }
// else{
// f9 = fin[f_mem(9 ,x ,y ,ZDIM-1,pitch)];
// f10= fin[f_mem(10,dmax_p(x-1,XDIM),y ,ZDIM-1,pitch)];
// f11= fin[f_mem(11,x ,dmax_p(y-1,YDIM),ZDIM-1,pitch)];
// f12= fin[f_mem(12,dmin_p(x+1,XDIM),y ,ZDIM-1,pitch)];
// f13= fin[f_mem(13,x ,dmin_p(y+1,YDIM),ZDIM-1,pitch)];
// }
// if(z != ZDIM-1){
// f14= fin[f_mem(14,x ,y ,z+1,pitch)];
// f15= fin[f_mem(15,x-1,y ,z+1,pitch)];
// f16= fin[f_mem(16,x ,y-1,z+1,pitch)];
// f17= fin[f_mem(17,x+1,y ,z+1,pitch)];
// f18= fin[f_mem(18,x ,y+1,z+1,pitch)];
// }
// else{
// f14= fin[f_mem(14,x ,y ,0 ,pitch)];
// f15= fin[f_mem(15,dmax_p(x-1,XDIM),y ,0 ,pitch)];
// f16= fin[f_mem(16,x ,dmax_p(y-1,YDIM),0 ,pitch)];
// f17= fin[f_mem(17,dmin_p(x+1,XDIM),y ,0 ,pitch)];
// f18= fin[f_mem(18,x ,dmin_p(y+1,YDIM),0 ,pitch)];
// }
//
// if(im == 1 || im == 10){//BB
// if(im == 10){
// check[0] = 1;
// sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
// sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
// sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
// }
// else{
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// }
// fout[j+pitch*YDIM*ZDIM*1 ] = f3 ;
// fout[j+pitch*YDIM*ZDIM*2 ] = f4 ;
// fout[j+pitch*YDIM*ZDIM*3 ] = f1 ;
// fout[j+pitch*YDIM*ZDIM*4 ] = f2 ;
// fout[j+pitch*YDIM*ZDIM*5 ] = f7 ;
// fout[j+pitch*YDIM*ZDIM*6 ] = f8 ;
// fout[j+pitch*YDIM*ZDIM*7 ] = f5 ;
// fout[j+pitch*YDIM*ZDIM*8 ] = f6 ;
// fout[j+pitch*YDIM*ZDIM*9 ] = f14;
// fout[j+pitch*YDIM*ZDIM*10] = f17;
// fout[j+pitch*YDIM*ZDIM*11] = f18;
// fout[j+pitch*YDIM*ZDIM*12] = f15;
// fout[j+pitch*YDIM*ZDIM*13] = f16;
// fout[j+pitch*YDIM*ZDIM*14] = f9 ;
// fout[j+pitch*YDIM*ZDIM*15] = f12;
// fout[j+pitch*YDIM*ZDIM*16] = f13;
// fout[j+pitch*YDIM*ZDIM*17] = f10;
// fout[j+pitch*YDIM*ZDIM*18] = f11;
// }
// else{
// sumX[threadIdx.x]=0.f;
// sumY[threadIdx.x]=0.f;
// sumZ[threadIdx.x]=0.f;
// boundaries_force(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
//
// if(im == 100)//north outlet
// {
// f0 = fin[f_mem(0 ,x,y-1,z,pitch)];
// f1 = fin[f_mem(1 ,x,y-1,z,pitch)];
// f3 = fin[f_mem(3 ,x,y-1,z,pitch)];
// f2 = fin[f_mem(2 ,x,y-1,z,pitch)];
// f5 = fin[f_mem(5 ,x,y-1,z,pitch)];
// f6 = fin[f_mem(6 ,x,y-1,z,pitch)];
// f4 = fin[f_mem(4 ,x,y-1,z,pitch)];
// f7 = fin[f_mem(7 ,x,y-1,z,pitch)];
// f8 = fin[f_mem(8 ,x,y-1,z,pitch)];
// f9 = fin[f_mem(9 ,x,y-1,z,pitch)];
// f10= fin[f_mem(10,x,y-1,z,pitch)];
// f11= fin[f_mem(11,x,y-1,z,pitch)];
// f12= fin[f_mem(12,x,y-1,z,pitch)];
// f13= fin[f_mem(13,x,y-1,z,pitch)];
// f14= fin[f_mem(14,x,y-1,z,pitch)];
// f15= fin[f_mem(15,x,y-1,z,pitch)];
// f16= fin[f_mem(16,x,y-1,z,pitch)];
// f17= fin[f_mem(17,x,y-1,z,pitch)];
// f18= fin[f_mem(18,x,y-1,z,pitch)];
//
// float rho,u,v,w;
// rho = 1.0f;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
//
// float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//
// m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
// m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
// m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
// m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
// m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
// m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
// m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
// m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
// m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
// m13 = f5+ - f6+ f7+ - f8 ;
// m14 = f11 + - f13 + - f16 + f18;
// m15 = f10 + - f12 + - f15 + f17 ;
// m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
// m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
// m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
//
//f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
//f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
//f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
//f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
//f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
//f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
//f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
//f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
//f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
//f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
//f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
//f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
//f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
//f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
//f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
//f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
//f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
//f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
//f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
//
// }
// if(im == 200)//south inlet
// {
// f0 = fin[f_mem(0 ,x,y+1,z,pitch)];
// f1 = fin[f_mem(1 ,x,y+1,z,pitch)];
// f3 = fin[f_mem(3 ,x,y+1,z,pitch)];
// f2 = fin[f_mem(2 ,x,y+1,z,pitch)];
// f5 = fin[f_mem(5 ,x,y+1,z,pitch)];
// f6 = fin[f_mem(6 ,x,y+1,z,pitch)];
// f4 = fin[f_mem(4 ,x,y+1,z,pitch)];
// f7 = fin[f_mem(7 ,x,y+1,z,pitch)];
// f8 = fin[f_mem(8 ,x,y+1,z,pitch)];
// f9 = fin[f_mem(9 ,x,y+1,z,pitch)];
// f10= fin[f_mem(10,x,y+1,z,pitch)];
// f11= fin[f_mem(11,x,y+1,z,pitch)];
// f12= fin[f_mem(12,x,y+1,z,pitch)];
// f13= fin[f_mem(13,x,y+1,z,pitch)];
// f14= fin[f_mem(14,x,y+1,z,pitch)];
// f15= fin[f_mem(15,x,y+1,z,pitch)];
// f16= fin[f_mem(16,x,y+1,z,pitch)];
// f17= fin[f_mem(17,x,y+1,z,pitch)];
// f18= fin[f_mem(18,x,y+1,z,pitch)];
//
// float rho,u,v,w;
// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = UMAX;//f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
//
// float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//
// m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
// m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
// m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
// m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
// m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
// m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
// m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
// m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
// m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
// m13 = f5+ - f6+ f7+ - f8 ;
// m14 = f11 + - f13 + - f16 + f18;
// m15 = f10 + - f12 + - f15 + f17 ;
// m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
// m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
// m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
//
//f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
//f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
//f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
//f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
//f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
//f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
//f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
//f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
//f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
//f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
//f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
//f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
//f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
//f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
//f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
//f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
//f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
//f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
//f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
//
// }
//
//
// //float Cs = 0.01f;
// //if(XDIM-x < 64.f)
// ////Cs = 0.01f+(x-64.f)/64.f*(x-64.f)/64.f*0.1f;
// //Cs = 0.01f*pow(2.f,((x-448.f)/16.f));
//
// if(MODEL == "MRT")
// mrt_collide_LES(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega,CS);
// else if(MODEL == "BGK")
// bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
//
// if(VELAV == "YES"){
// if(t>=START_VELAV && t<START_VELFLUC){
// u_Av = uAv[j];
// v_Av = vAv[j];
// vel_av(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
// uAv[j] = u_Av;
// vAv[j] = v_Av;
// }
// else if(t>=START_VELFLUC){
// u_Av = uAv[j];
// v_Av = vAv[j];
// u_fluc = ufluc[j];
// v_fluc = vfluc[j];
// vel_fluc(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
// ufluc[j] = u_fluc;
// vfluc[j] = v_fluc;
//
// }
// }
//
//
//
// fout[f_mem(0 ,x,y,z,pitch)] = f0 ;
// fout[f_mem(1 ,x,y,z,pitch)] = f1 ;
// fout[f_mem(2 ,x,y,z,pitch)] = f2 ;
// fout[f_mem(3 ,x,y,z,pitch)] = f3 ;
// fout[f_mem(4 ,x,y,z,pitch)] = f4 ;
// fout[f_mem(5 ,x,y,z,pitch)] = f5 ;
// fout[f_mem(6 ,x,y,z,pitch)] = f6 ;
// fout[f_mem(7 ,x,y,z,pitch)] = f7 ;
// fout[f_mem(8 ,x,y,z,pitch)] = f8 ;
// fout[f_mem(9 ,x,y,z,pitch)] = f9 ;
// fout[f_mem(10,x,y,z,pitch)] = f10;
// fout[f_mem(11,x,y,z,pitch)] = f11;
// fout[f_mem(12,x,y,z,pitch)] = f12;
// fout[f_mem(13,x,y,z,pitch)] = f13;
// fout[f_mem(14,x,y,z,pitch)] = f14;
// fout[f_mem(15,x,y,z,pitch)] = f15;
// fout[f_mem(16,x,y,z,pitch)] = f16;
// fout[f_mem(17,x,y,z,pitch)] = f17;
// fout[f_mem(18,x,y,z,pitch)] = f18;
// }
//
// syncthreads();
// if(check[0] == 1 && t>=STARTF && REFINEMENT == "NO"){
// //reduction for force
// int nTotalThreads = blockDim.x;
// while(nTotalThreads > 1){
// int halfPoint = (nTotalThreads >> 1);
// if(threadIdx.x < halfPoint){
// sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
// sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
// sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
// }
// syncthreads();
// nTotalThreads = halfPoint;
// }
// if(threadIdx.x == 0){
// atomicAdd(&FX[t],sumX[0]);
// atomicAdd(&FY[t],sumY[0]);
// atomicAdd(&FZ[t],sumZ[0]);
// }
// }
//
//
//// }
//}
__global__ void mrt_d_single(float* fA, float* fB,
float omega, size_t pitch, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z+ZDIM/2*GPU;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
// && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)){
// }
// else{
f0 = fA[j];
f1 = fA[f_mem(1 ,x-1,y ,z ,pitch)];
f3 = fA[f_mem(3 ,x+1,y ,z ,pitch)];
f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)];
f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch)];
f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch)];
f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)];
f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch)];
f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch)];
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch)];
f14= fA[f_mem(14,x ,y ,z+1,pitch)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch)];
//f18= fA[f_mem(18,x ,y+1,dmin(z+1,ZDIM),pitch)];
if(z != ZDIM-1)
f18= fA[f_mem(18,x ,y+1,z+1,pitch)];
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch)] = f14;
fB[f_mem(10,x,y,z,pitch)] = f17;
fB[f_mem(11,x,y,z,pitch)] = f18;
fB[f_mem(12,x,y,z,pitch)] = f15;
fB[f_mem(13,x,y,z,pitch)] = f16;
fB[f_mem(14,x,y,z,pitch)] = f9 ;
fB[f_mem(15,x,y,z,pitch)] = f12;
fB[f_mem(16,x,y,z,pitch)] = f13;
fB[f_mem(17,x,y,z,pitch)] = f10;
fB[f_mem(18,x,y,z,pitch)] = f11;
}
else{
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch)] = f9 ;
fB[f_mem(10,x,y,z,pitch)] = f10;
fB[f_mem(11,x,y,z,pitch)] = f11;
fB[f_mem(12,x,y,z,pitch)] = f12;
fB[f_mem(13,x,y,z,pitch)] = f13;
fB[f_mem(14,x,y,z,pitch)] = f14;
fB[f_mem(15,x,y,z,pitch)] = f15;
fB[f_mem(16,x,y,z,pitch)] = f16;
fB[f_mem(17,x,y,z,pitch)] = f17;
fB[f_mem(18,x,y,z,pitch)] = f18;
}
// }
}
__global__ void update_inner(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*ZDIM/2+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
// && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)){
// }
// else{
if(z==ZDIM/2-1-1){//top nodes need info from h
f0 = fA[j];
f1 = fA[f_mem (1 ,dmax(x-1) ,y ,z ,pitch)];
f3 = fA[f_mem (3 ,dmin(x+1,XDIM),y ,z ,pitch)];
f2 = fA[f_mem (2 ,x ,dmax(y-1) ,z ,pitch)];
f5 = fA[f_mem (5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch)];
f6 = fA[f_mem (6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch)];
f4 = fA[f_mem (4 ,x ,dmin(y+1,YDIM),z ,pitch)];
f7 = fA[f_mem (7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch)];
f8 = fA[f_mem (8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch)];
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch)];
f10= fA[f_mem (10,dmax(x-1) ,y ,z-1,pitch)];
f11= fA[f_mem (11,x ,dmax(y-1) ,z-1,pitch)];
f12= fA[f_mem (12,dmin(x+1,XDIM),y ,z-1,pitch)];
f13= fA[f_mem (13,x ,dmin(y+1,YDIM),z-1,pitch)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,dmax(x-1) ,y ,pitch)];
f16= h [buff_mem(16,x ,dmax(y-1) ,pitch)];
f17= h [buff_mem(17,dmin(x+1,XDIM),y ,pitch)];
f18= h [buff_mem(18,x ,dmin(y+1,YDIM),pitch)];
}
else if(z==0){//bottom nodes need info from g
f0 = fA[j];
f1 = fA[f_mem (1 ,dmax(x-1) ,y ,z ,pitch)];
f3 = fA[f_mem (3 ,dmin(x+1,XDIM),y ,z ,pitch)];
f2 = fA[f_mem (2 ,x ,dmax(y-1) ,z ,pitch)];
f5 = fA[f_mem (5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch)];
f6 = fA[f_mem (6 ,dmin(x+1,XDIM),dmax(y-1) ,z ,pitch)];
f4 = fA[f_mem (4 ,x ,dmin(y+1,YDIM),z ,pitch)];
f7 = fA[f_mem (7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z ,pitch)];
f8 = fA[f_mem (8 ,dmax(x-1) ,dmin(y+1,YDIM),z ,pitch)];
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,dmax(x-1) ,y ,pitch)];
f11= g [buff_mem(11,x ,dmax(y-1) ,pitch)];
f12= g [buff_mem(12,dmin(x+1,XDIM),y ,pitch)];
f13= g [buff_mem(13,x ,dmin(y+1,YDIM),pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch)];
f15= fA[f_mem (15,dmax(x-1) ,y ,z+1,pitch)];
f16= fA[f_mem (16,x ,dmax(y-1) ,z+1,pitch)];
f17= fA[f_mem (17,dmin(x+1,XDIM),y ,z+1,pitch)];
f18= fA[f_mem (18,x ,dmin(y+1,YDIM),z+1,pitch)];
}
else{//normal nodes
f0 = fA[j];
f1 = fA[f_mem(1 ,dmax(x-1) ,y ,z,pitch)];
f3 = fA[f_mem(3 ,dmin(x+1,XDIM),y ,z,pitch)];
f2 = fA[f_mem(2 ,x ,dmax(y-1) ,z,pitch)];
f5 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z,pitch)];
f6 = fA[f_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,z,pitch)];
f4 = fA[f_mem(4 ,x ,dmin(y+1,YDIM),z,pitch)];
f7 = fA[f_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),z,pitch)];
f8 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),z,pitch)];
f9 = fA[f_mem(9 ,x ,y ,z,pitch)];
f10= fA[f_mem(10,dmax(x-1) ,y ,z,pitch)];
f11= fA[f_mem(11,x ,dmax(y-1) ,z,pitch)];
f12= fA[f_mem(12,dmin(x+1,XDIM),y ,z,pitch)];
f13= fA[f_mem(13,x ,dmin(y+1,YDIM),z,pitch)];
f14= fA[f_mem(14,x ,y ,z,pitch)];
f15= fA[f_mem(15,dmax(x-1) ,y ,z,pitch)];
f16= fA[f_mem(16,x ,dmax(y-1) ,z,pitch)];
f17= fA[f_mem(17,dmin(x+1,XDIM),y ,z,pitch)];
f18= fA[f_mem(18,x ,dmin(y+1,YDIM),z,pitch)];
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch)] = f14;
fB[f_mem(10,x,y,z,pitch)] = f17;
fB[f_mem(11,x,y,z,pitch)] = f18;
fB[f_mem(12,x,y,z,pitch)] = f15;
fB[f_mem(13,x,y,z,pitch)] = f16;
fB[f_mem(14,x,y,z,pitch)] = f9 ;
fB[f_mem(15,x,y,z,pitch)] = f12;
fB[f_mem(16,x,y,z,pitch)] = f13;
fB[f_mem(17,x,y,z,pitch)] = f10;
fB[f_mem(18,x,y,z,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch)];
f10= fA[f_mem(10,x,y-1,z,pitch)];
f11= fA[f_mem(11,x,y-1,z,pitch)];
f12= fA[f_mem(12,x,y-1,z,pitch)];
f13= fA[f_mem(13,x,y-1,z,pitch)];
f14= fA[f_mem(14,x,y-1,z,pitch)];
f15= fA[f_mem(15,x,y-1,z,pitch)];
f16= fA[f_mem(16,x,y-1,z,pitch)];
f17= fA[f_mem(17,x,y-1,z,pitch)];
f18= fA[f_mem(18,x,y-1,z,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch)];
f10= fA[f_mem(10,x,y+1,z,pitch)];
f11= fA[f_mem(11,x,y+1,z,pitch)];
f12= fA[f_mem(12,x,y+1,z,pitch)];
f13= fA[f_mem(13,x,y+1,z,pitch)];
f14= fA[f_mem(14,x,y+1,z,pitch)];
f15= fA[f_mem(15,x,y+1,z,pitch)];
f16= fA[f_mem(16,x,y+1,z,pitch)];
f17= fA[f_mem(17,x,y+1,z,pitch)];
f18= fA[f_mem(18,x,y+1,z,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch)] = f9 ;
fB[f_mem(10,x,y,z,pitch)] = f10;
fB[f_mem(11,x,y,z,pitch)] = f11;
fB[f_mem(12,x,y,z,pitch)] = f12;
fB[f_mem(13,x,y,z,pitch)] = f13;
fB[f_mem(14,x,y,z,pitch)] = f14;
fB[f_mem(15,x,y,z,pitch)] = f15;
fB[f_mem(16,x,y,z,pitch)] = f16;
fB[f_mem(17,x,y,z,pitch)] = f17;
fB[f_mem(18,x,y,z,pitch)] = f18;
}
// }
}
__global__ void update_bottom(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*ZDIM/2);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = gA [j];
f1 = gA [buff_mem(1 ,dmax(x-1) ,y ,pitch)];
f3 = gA [buff_mem(3 ,dmin(x+1,XDIM),y ,pitch)];
f2 = gA [buff_mem(2 ,x ,dmax(y-1) ,pitch)];
f5 = gA [buff_mem(5 ,dmax(x-1) ,dmax(y-1) ,pitch)];
f6 = gA [buff_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,pitch)];
f4 = gA [buff_mem(4 ,x ,dmin(y+1,YDIM),pitch)];
f7 = gA [buff_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),pitch)];
f8 = gA [buff_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,dmax(x-1) ,y ,pitch)];
f11= temp[buff_mem(11,x ,dmax(y-1) ,pitch)];
f12= temp[buff_mem(12,dmin(x+1,XDIM),y ,pitch)];
f13= temp[buff_mem(13,x ,dmin(y+1,YDIM),pitch)];
f14= f [f_mem (14,x ,y ,0,pitch)];
f15= f [f_mem (15,dmax(x-1) ,y ,0,pitch)];
f16= f [f_mem (16,x ,dmax(y-1) ,0,pitch)];
f17= f [f_mem (17,dmin(x+1,XDIM),y ,0,pitch)];
f18= f [f_mem (18,x ,dmin(y+1,YDIM),0,pitch)];
if(im == 1 || im ==10){//BB
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*ZDIM/2,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_top(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*ZDIM/2-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_mem(1 ,dmax(x-1) ,y ,pitch)];
f3 = hA [buff_mem(3 ,dmin(x+1,XDIM),y ,pitch)];
f2 = hA [buff_mem(2 ,x ,dmax(y-1) ,pitch)];
f5 = hA [buff_mem(5 ,dmax(x-1) ,dmax(y-1) ,pitch)];
f6 = hA [buff_mem(6 ,dmin(x+1,XDIM),dmax(y-1) ,pitch)];
f4 = hA [buff_mem(4 ,x ,dmin(y+1,YDIM),pitch)];
f7 = hA [buff_mem(7 ,dmin(x+1,XDIM),dmin(y+1,YDIM),pitch)];
f8 = hA [buff_mem(8 ,dmax(x-1) ,dmin(y+1,YDIM),pitch)];
f9 = f [f_mem (9 ,x ,y ,ZDIM/2-2-1,pitch)];
f10= f [f_mem (10,dmax(x-1) ,y ,ZDIM/2-2-1,pitch)];
f11= f [f_mem (11,x ,dmax(y-1) ,ZDIM/2-2-1,pitch)];
f12= f [f_mem (12,dmin(x+1,XDIM),y ,ZDIM/2-2-1,pitch)];
f13= f [f_mem (13,x ,dmin(y+1,YDIM),ZDIM/2-2-1,pitch)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,dmax(x-1) ,y ,pitch)];
f16= temp[buff_mem(16,x ,dmax(y-1) ,pitch)];
f17= temp[buff_mem(17,dmin(x+1,XDIM),y ,pitch)];
f18= temp[buff_mem(18,x ,dmin(y+1,YDIM),pitch)];
if(im == 1 || im ==10){//BB
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*ZDIM/2,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__device__ __inline__ float ld_gb1_cg(const float *addr)
{
float return_value;
asm("ld.global.cg.f32 %0, [%1];" : "=f"(return_value) : "l"(addr));
return return_value;
}
__global__ void initialize_single(float *f, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,z);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.05f;
v = UMAX;
w = 0.0f;
// if(im == 10 || im == 1){
// u = 0.0f;
// v = 0.0f;
// w = 0.0f;
// }
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
float f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f[j+0 *pitch*YDIM*(ZDIM/2-2)]=f0 ;
f[j+1 *pitch*YDIM*(ZDIM/2-2)]=f1 ;
f[j+2 *pitch*YDIM*(ZDIM/2-2)]=f2 ;
f[j+3 *pitch*YDIM*(ZDIM/2-2)]=f3 ;
f[j+4 *pitch*YDIM*(ZDIM/2-2)]=f4 ;
f[j+5 *pitch*YDIM*(ZDIM/2-2)]=f5 ;
f[j+6 *pitch*YDIM*(ZDIM/2-2)]=f6 ;
f[j+7 *pitch*YDIM*(ZDIM/2-2)]=f7 ;
f[j+8 *pitch*YDIM*(ZDIM/2-2)]=f8 ;
f[j+9 *pitch*YDIM*(ZDIM/2-2)]=f9 ;
f[j+10*pitch*YDIM*(ZDIM/2-2)]=f10;
f[j+11*pitch*YDIM*(ZDIM/2-2)]=f11;
f[j+12*pitch*YDIM*(ZDIM/2-2)]=f12;
f[j+13*pitch*YDIM*(ZDIM/2-2)]=f13;
f[j+14*pitch*YDIM*(ZDIM/2-2)]=f14;
f[j+15*pitch*YDIM*(ZDIM/2-2)]=f15;
f[j+16*pitch*YDIM*(ZDIM/2-2)]=f16;
f[j+17*pitch*YDIM*(ZDIM/2-2)]=f17;
f[j+18*pitch*YDIM*(ZDIM/2-2)]=f18;
}
if(x == XDIM-1){
for(int i = XDIM; i<pitch; i++){
j = i+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
f[j+0 *pitch*YDIM*ZDIM]=0.f;
f[j+1 *pitch*YDIM*ZDIM]=0.f;
f[j+2 *pitch*YDIM*ZDIM]=0.f;
f[j+3 *pitch*YDIM*ZDIM]=0.f;
f[j+4 *pitch*YDIM*ZDIM]=0.f;
f[j+5 *pitch*YDIM*ZDIM]=0.f;
f[j+6 *pitch*YDIM*ZDIM]=0.f;
f[j+7 *pitch*YDIM*ZDIM]=0.f;
f[j+8 *pitch*YDIM*ZDIM]=0.f;
f[j+9 *pitch*YDIM*ZDIM]=0.f;
f[j+10*pitch*YDIM*ZDIM]=0.f;
f[j+11*pitch*YDIM*ZDIM]=0.f;
f[j+12*pitch*YDIM*ZDIM]=0.f;
f[j+13*pitch*YDIM*ZDIM]=0.f;
f[j+14*pitch*YDIM*ZDIM]=0.f;
f[j+15*pitch*YDIM*ZDIM]=0.f;
f[j+16*pitch*YDIM*ZDIM]=0.f;
f[j+17*pitch*YDIM*ZDIM]=0.f;
f[j+18*pitch*YDIM*ZDIM]=0.f;
}
}
}
__global__ void initialize_buffer(float *g, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.05f;
v = UMAX;
w = 0.0f;
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
g[j+0 *pitch*YDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
g[j+1 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+2 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+3 *pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+4 *pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+5 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
g[j+6 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
g[j+7 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
g[j+8 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
g[j+9 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+10*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
g[j+11*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
g[j+12*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
g[j+13*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
g[j+14*pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+15*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
g[j+16*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
g[j+17*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
g[j+18*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
float f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
g[j+0 *pitch*YDIM]=f0 ;
g[j+1 *pitch*YDIM]=f1 ;
g[j+2 *pitch*YDIM]=f2 ;
g[j+3 *pitch*YDIM]=f3 ;
g[j+4 *pitch*YDIM]=f4 ;
g[j+5 *pitch*YDIM]=f5 ;
g[j+6 *pitch*YDIM]=f6 ;
g[j+7 *pitch*YDIM]=f7 ;
g[j+8 *pitch*YDIM]=f8 ;
g[j+9 *pitch*YDIM]=f9 ;
g[j+10*pitch*YDIM]=f10;
g[j+11*pitch*YDIM]=f11;
g[j+12*pitch*YDIM]=f12;
g[j+13*pitch*YDIM]=f13;
g[j+14*pitch*YDIM]=f14;
g[j+15*pitch*YDIM]=f15;
g[j+16*pitch*YDIM]=f16;
g[j+17*pitch*YDIM]=f17;
g[j+18*pitch*YDIM]=f18;
}
}
int main(int argc, char *argv[])
{
int GPU_N;
cudaGetDeviceCount(&GPU_N);
GPU_N = 2;
cout<<"number of GPUs: "<<GPU_N<<endl;
//int *image_d, *image_h;
ofstream output;
ofstream output2;
string FileName = CASENAME;
//output.open ("LBM1_out.dat");
output.open ((FileName+".dat").c_str());
output2.open ((FileName+".force").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch = pitch*sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
int i, n, nBlocks;
float omega, CharLength;
CharLength = OBSTR1*2.f;
omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
cout<<"omega : "<<omega<<endl;
cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
cout<<"TMAX: "<<TMAX<<endl;
cout<<"Method: "<<METHOD<<endl;
cout<<"Model: "<<MODEL<<endl;
int zInner = ZDIM/GPU_N-2;
//nBlocks does not include the halo layers
nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY)
*((zInner+BLOCKSIZEZ-1)/BLOCKSIZEZ);
int B = BLOCKSIZEX*BLOCKSIZEY*BLOCKSIZEZ;
n = nBlocks*B;
cout<<"nBlocks:"<<nBlocks<<endl;
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 h_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
cudaSetDevice(0);
cudaStream_t stream[3];
cudaStreamCreate(&stream[0]);
cudaStreamCreate(&stream[1]);
cudaStreamCreate(&stream[2]);
cudaDeviceEnablePeerAccess(1,0);
//buffers g,h
float *gA_h;
float *gA_d ,*gB_d;
float *gA_temp_d;
float *hA_h;
float *hA_d ,*hB_d;
float *hA_temp_d;
//image_h = (int *)malloc(memsize_int);
float *fA_h,*fA_d,*fB_d;
float *FX_h,*FY_h,*FZ_h,*FX_d,*FY_d,*FZ_d;
gA_h = (float *)malloc(XDIM*YDIM*19*sizeof(float));
hA_h = (float *)malloc(XDIM*YDIM*19*sizeof(float));
fA_h = (float *)malloc(XDIM*YDIM*zInner*sizeof(float)*19);//(float *)malloc(memsize *19);
FX_h = (float *)malloc(TMAX*sizeof(float));
FY_h = (float *)malloc(TMAX*sizeof(float));
FZ_h = (float *)malloc(TMAX*sizeof(float));
// cudaMallocPitch((void **) &fA_d, &pitch, XDIM*sizeof(float), YDIM*zInner*19);
// cudaMallocPitch((void **) &fB_d, &pitch, XDIM*sizeof(float), YDIM*zInner*19);
// cudaMallocPitch((void **) &gA_d, &pitch, XDIM*sizeof(float), YDIM*19);
// cudaMallocPitch((void **) &gB_d, &pitch, XDIM*sizeof(float), YDIM*19);
// cudaMallocPitch((void **) &hA_d, &pitch, XDIM*sizeof(float), YDIM*19);
// cudaMallocPitch((void **) &hB_d, &pitch, XDIM*sizeof(float), YDIM*19);
// cudaMallocPitch((void **) &gA_temp_d, &pitch, XDIM*sizeof(float), YDIM*19);
// cudaMallocPitch((void **) &hA_temp_d, &pitch, XDIM*sizeof(float), YDIM*19);
cudaMalloc((void **) &fA_d , pitch*YDIM*19*sizeof(float)*zInner);
cudaMalloc((void **) &fB_d , pitch*YDIM*19*sizeof(float)*zInner);
cudaMalloc((void **) &gA_d , pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &gB_d , pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &hA_d , pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &hB_d , pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &gA_temp_d, pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &hA_temp_d, pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &FX_d, TMAX*sizeof(float));
cudaMalloc((void **) &FY_d, TMAX*sizeof(float));
cudaMalloc((void **) &FZ_d, TMAX*sizeof(float));
size_t pitch_elements = pitch/sizeof(float);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
//initialize host f
for (i = 0; i < n*19; i++)
{
fA_h[i] = 0;
}
//initialize host g,h
for (i = 0; i < XDIM*YDIM*19; i++)
{
gA_h[i] = 0;
hA_h[i] = 0;
}
//initialize host FX
for (i = 0; i < TMAX; i++){
FX_h[i] = 0.f;
FY_h[i] = 0.f;
FZ_h[i] = 0.f;
}
//memcpy FX
cudaMemcpy(FX_d, FX_h, TMAX*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(FY_d, FY_h, TMAX*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(FZ_d, FZ_h, TMAX*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy2D(fA_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(fB_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(gA_d,pitch ,gA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(gB_d,pitch ,gA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(hA_d,pitch ,hA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(hB_d,pitch ,hA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,cudaMemcpyHostToDevice);
cudaSetDevice(1);
cudaDeviceEnablePeerAccess(0,0);
cudaStream_t stream2[3];
cudaStreamCreate(&stream2[0]);
cudaStreamCreate(&stream2[1]);
cudaStreamCreate(&stream2[2]);
float *gA_d2,*gB_d2;
float *gA_h2;
float *gA_temp_d2;
float *hA_d2,*hB_d2;
float *hA_h2;
float *hA_temp_d2;
float *fA_h2,*fA_d2,*fB_d2;
float *FX_h2,*FY_h2,*FZ_h2,*FX_d2,*FY_d2,*FZ_d2;
gA_h2 = (float *)malloc(XDIM*YDIM*19*sizeof(float));
hA_h2 = (float *)malloc(XDIM*YDIM*19*sizeof(float));
fA_h2 = (float *)malloc(XDIM*YDIM*zInner*sizeof(float)*19);//(float *)malloc(memsize*19);
FX_h2 = (float *)malloc(TMAX*sizeof(float));
FY_h2 = (float *)malloc(TMAX*sizeof(float));
FZ_h2 = (float *)malloc(TMAX*sizeof(float));
// cudaMallocPitch((void **) &fA_d2, &pitch, XDIM*sizeof(float), YDIM*zInner*19);
// cudaMallocPitch((void **) &fB_d2, &pitch, XDIM*sizeof(float), YDIM*zInner*19);
// cudaMallocPitch((void **) &gA_d2, &pitch, XDIM*sizeof(float), YDIM*19);
// cudaMallocPitch((void **) &gB_d2, &pitch, XDIM*sizeof(float), YDIM*19);
// cudaMallocPitch((void **) &hA_d2, &pitch, XDIM*sizeof(float), YDIM*19);
// cudaMallocPitch((void **) &hB_d2, &pitch, XDIM*sizeof(float), YDIM*19);
// cudaMallocPitch((void **) &gA_temp_d2, &pitch, XDIM*sizeof(float), YDIM*19);
// cudaMallocPitch((void **) &hA_temp_d2, &pitch, XDIM*sizeof(float), YDIM*19);
cudaMalloc((void **) &fA_d2 , pitch*YDIM*19*sizeof(float)*zInner);
cudaMalloc((void **) &fB_d2 , pitch*YDIM*19*sizeof(float)*zInner);
cudaMalloc((void **) &gA_d2 , pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &gB_d2 , pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &hA_d2 , pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &hB_d2 , pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &gA_temp_d2, pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &hA_temp_d2, pitch*YDIM*19*sizeof(float));
cudaMalloc((void **) &FX_d2, TMAX*sizeof(float));
cudaMalloc((void **) &FY_d2, TMAX*sizeof(float));
cudaMalloc((void **) &FZ_d2, TMAX*sizeof(float));
for (i = 0; i < n*19; i++)
{
fA_h2[i] = 0;
}
//initialize host g,h
for (i = 0; i < XDIM*YDIM*19; i++)
{
gA_h2[i] = 0;
hA_h2[i] = 0;
}
for (i = 0; i < TMAX; i++){
FX_h2[i] = 0.f;
FY_h2[i] = 0.f;
FZ_h2[i] = 0.f;
}
cudaMemcpy(FX_d2, FX_h2, TMAX*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(FY_d2, FY_h2, TMAX*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(FZ_d2, FZ_h2, TMAX*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy2D(fA_d2,pitch ,fA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(fB_d2,pitch ,fA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(gA_d2,pitch ,gA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(gB_d2,pitch ,gA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(hA_d2,pitch ,hA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(hB_d2,pitch ,hA_h2,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*19,cudaMemcpyHostToDevice);
cudaSetDevice(0);
initialize_single<<<grid, threads>>>(fA_d,pitch_elements);
initialize_single<<<grid, threads>>>(fB_d,pitch_elements);
initialize_buffer<<<g_grid, threads>>>(gA_d,pitch_elements);
initialize_buffer<<<h_grid, threads>>>(hA_d,pitch_elements);
initialize_buffer<<<g_grid, threads>>>(gB_d,pitch_elements);
initialize_buffer<<<h_grid, threads>>>(hB_d,pitch_elements);
cudaSetDevice(1);
initialize_single<<<grid, threads>>>(fA_d2,pitch_elements);
initialize_single<<<grid, threads>>>(fB_d2,pitch_elements);
initialize_buffer<<<g_grid, threads>>>(gA_d2,pitch_elements);
initialize_buffer<<<h_grid, threads>>>(hA_d2,pitch_elements);
initialize_buffer<<<g_grid, threads>>>(gB_d2,pitch_elements);
initialize_buffer<<<h_grid, threads>>>(hB_d2,pitch_elements);
cudaSetDevice(0);
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//time loop
for(int t = 0; t<TMAX; t+=2){
//memcpy from dev0 to dev1
//cudaMemcpyPeerAsync(*dst,dstdev#,*src,srcdev#,size_t(bytes),cudastream#);
cudaSetDevice(0);
//send g from dev1 to h of dev0
cudaMemcpyPeerAsync(&hA_temp_d[pitch_elements*YDIM*14],0,&gA_d2[pitch_elements*YDIM*14],1,pitch_elements*YDIM*sizeof(float)*5,stream[0]);
//send g from dev0 to h of dev1
cudaMemcpyPeerAsync(&hA_temp_d2[pitch_elements*YDIM*14],1,&gA_d[pitch_elements*YDIM*14],0,pitch_elements*YDIM*sizeof(float)*5,stream2[0]);
//send h from dev1 to g of dev0
cudaMemcpyPeerAsync(&gA_temp_d[pitch_elements*YDIM*9],0,&hA_d2[pitch_elements*YDIM*9],1,pitch_elements*YDIM*sizeof(float)*5,stream[0]);
//send h from dev0 to g of dev1
cudaMemcpyPeerAsync(&gA_temp_d2[pitch_elements*YDIM*9],1,&hA_d[pitch_elements*YDIM*9],0,pitch_elements*YDIM*sizeof(float)*5,stream2[0]);
cudaSetDevice(0);
update_inner <<< grid, threads, 0, stream[1]>>> (fA_d,fB_d,gA_d,hA_d,omega,pitch_elements,0);
update_top <<<h_grid, threads, 0, stream[0]>>>(hA_d,hB_d,fA_d,hA_temp_d,omega,pitch_elements,0);
update_bottom<<<h_grid, threads, 0, stream[0]>>>(gA_d,gB_d,fA_d,gA_temp_d,omega,pitch_elements,0);
cudaSetDevice(1);
update_inner <<< grid, threads, 0, stream2[1]>>> (fA_d2,fB_d2,gA_d2,hA_d2,omega,pitch_elements,1);
update_top <<<h_grid, threads, 0, stream2[0]>>>(hA_d2,hB_d2,fA_d2,hA_temp_d2,omega,pitch_elements,1);
update_bottom<<<h_grid, threads, 0, stream2[0]>>>(gA_d2,gB_d2,fA_d2,gA_temp_d2,omega,pitch_elements,1);
cudaDeviceSynchronize();
cudaSetDevice(0);
//send g from dev1 to h of dev0
cudaMemcpyPeerAsync(&hA_temp_d[pitch_elements*YDIM*14],0,&gB_d2[pitch_elements*YDIM*14],1,pitch_elements*YDIM*sizeof(float)*5,stream[0]);
//send g from dev0 to h of dev1
cudaMemcpyPeerAsync(&hA_temp_d2[pitch_elements*YDIM*14],1,&gB_d[pitch_elements*YDIM*14],0,pitch_elements*YDIM*sizeof(float)*5,stream2[0]);
//send h from dev1 to g of dev0
cudaMemcpyPeerAsync(&gA_temp_d[pitch_elements*YDIM*9],0,&hB_d2[pitch_elements*YDIM*9],1,pitch_elements*YDIM*sizeof(float)*5,stream[0]);
//send h from dev0 to g of dev1
cudaMemcpyPeerAsync(&gA_temp_d2[pitch_elements*YDIM*9],1,&hB_d[pitch_elements*YDIM*9],0,pitch_elements*YDIM*sizeof(float)*5,stream2[0]);
cudaSetDevice(0);
update_inner <<< grid, threads, 0, stream[1]>>> (fB_d,fA_d,gB_d,hB_d,omega,pitch_elements,0);
update_top <<<h_grid, threads, 0, stream[0]>>>(hB_d,hA_d,fB_d,hA_temp_d,omega,pitch_elements,0);
update_bottom<<<h_grid, threads, 0, stream[0]>>>(gB_d,gA_d,fB_d,gA_temp_d,omega,pitch_elements,0);
cudaSetDevice(1);
update_inner <<< grid, threads, 0, stream2[1]>>> (fB_d2,fA_d2,gB_d2,hB_d2,omega,pitch_elements,1);
update_top <<<h_grid, threads, 0, stream2[0]>>>(hB_d2,hA_d2,fB_d2,hA_temp_d2,omega,pitch_elements,1);
update_bottom<<<h_grid, threads, 0, stream2[0]>>>(gB_d2,gA_d2,fB_d2,gA_temp_d2,omega,pitch_elements,1);
cudaDeviceSynchronize();
// if(METHOD == "SINGLE"){
// if(t >= STARTF)
// mrt_d_single_force<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t,uAv_d,vAv_d,ufluc_d,vfluc_d,0);
// else
// mrt_d_single<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements,0);
//
//
// if(t >= STARTF)
// mrt_d_single_force<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t+1,uAv_d,vAv_d,ufluc_d,vfluc_d,0);
// else
// mrt_d_single<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements,0);
// }
//
// cudaSetDevice(1);
//
// if(METHOD == "SINGLE"){
// if(t >= STARTF)
// mrt_d_single_force<<<grid, threads>>>(fA_d2,fB_d2,omega,pitch_elements,FX_d2,FY_d2,FZ_d2,t,uAv_d2,vAv_d2,ufluc_d2,vfluc_d2,1);
// else
// mrt_d_single<<<grid, threads>>>(fA_d2,fB_d2,omega,pitch_elements,1);
//
//
// if(t >= STARTF)
// mrt_d_single_force<<<grid, threads>>>(fB_d2,fA_d2,omega,pitch_elements,FX_d2,FY_d2,FZ_d2,t+1,uAv_d2,vAv_d2,ufluc_d2,vfluc_d2,1);
// else
// mrt_d_single<<<grid, threads>>>(fB_d2,fA_d2,omega,pitch_elements,1);
// }
// if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n";
}
cudaDeviceSynchronize();
cudaSetDevice(0);
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)";
cout<<endl;
cout<<XDIM<<","<<YDIM<<","<<ZDIM<<","<<TMAX<<","<<restime<<endl;
cudaMemcpy2D(fA_h,XDIM*sizeof(float),fB_d,pitch,XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(gA_h,XDIM*sizeof(float),gB_d,pitch,XDIM*sizeof(float),YDIM*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(hA_h,XDIM*sizeof(float),hB_d,pitch,XDIM*sizeof(float),YDIM*19,cudaMemcpyDeviceToHost);
cudaMemcpy(FX_h, FX_d, TMAX*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(FY_h, FY_d, TMAX*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(FZ_h, FZ_d, TMAX*sizeof(float), cudaMemcpyDeviceToHost);
cudaSetDevice(1);
cudaMemcpy2D(fA_h2,XDIM*sizeof(float),fB_d2,pitch,XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(gA_h2,XDIM*sizeof(float),gB_d2,pitch,XDIM*sizeof(float),YDIM*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(hA_h2,XDIM*sizeof(float),hB_d2,pitch,XDIM*sizeof(float),YDIM*19,cudaMemcpyDeviceToHost);
cudaMemcpy(FX_h2, FX_d2, TMAX*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(FY_h2, FY_d2, TMAX*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(FZ_h2, FZ_d2, TMAX*sizeof(float), cudaMemcpyDeviceToHost);
cudaSetDevice(0);
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n";
int row = 0;
int col = 0;
int dep = 0;
i = 0;
float rho, u, v, w;//, usqr;
//int j;
int check = 0;
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = row*XDIM+col;
f0 = gA_h[i+XDIM*YDIM*0 ];
f1 = gA_h[i+XDIM*YDIM*1 ];
f2 = gA_h[i+XDIM*YDIM*2 ];
f3 = gA_h[i+XDIM*YDIM*3 ];
f4 = gA_h[i+XDIM*YDIM*4 ];
f5 = gA_h[i+XDIM*YDIM*5 ];
f6 = gA_h[i+XDIM*YDIM*6 ];
f7 = gA_h[i+XDIM*YDIM*7 ];
f8 = gA_h[i+XDIM*YDIM*8 ];
f9 = gA_h[i+XDIM*YDIM*9 ];
f10= gA_h[i+XDIM*YDIM*10];
f11= gA_h[i+XDIM*YDIM*11];
f12= gA_h[i+XDIM*YDIM*12];
f13= gA_h[i+XDIM*YDIM*13];
f14= gA_h[i+XDIM*YDIM*14];
f15= gA_h[i+XDIM*YDIM*15];
f16= gA_h[i+XDIM*YDIM*16];
f17= gA_h[i+XDIM*YDIM*17];
f18= gA_h[i+XDIM*YDIM*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<0<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}
}
for(dep = 0; dep<zInner; dep++){
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = dep*XDIM*YDIM+row*XDIM+col;
f0 = fA_h[i+XDIM*YDIM*zInner*0 ];
f1 = fA_h[i+XDIM*YDIM*zInner*1 ];
f2 = fA_h[i+XDIM*YDIM*zInner*2 ];
f3 = fA_h[i+XDIM*YDIM*zInner*3 ];
f4 = fA_h[i+XDIM*YDIM*zInner*4 ];
f5 = fA_h[i+XDIM*YDIM*zInner*5 ];
f6 = fA_h[i+XDIM*YDIM*zInner*6 ];
f7 = fA_h[i+XDIM*YDIM*zInner*7 ];
f8 = fA_h[i+XDIM*YDIM*zInner*8 ];
f9 = fA_h[i+XDIM*YDIM*zInner*9 ];
f10= fA_h[i+XDIM*YDIM*zInner*10];
f11= fA_h[i+XDIM*YDIM*zInner*11];
f12= fA_h[i+XDIM*YDIM*zInner*12];
f13= fA_h[i+XDIM*YDIM*zInner*13];
f14= fA_h[i+XDIM*YDIM*zInner*14];
f15= fA_h[i+XDIM*YDIM*zInner*15];
f16= fA_h[i+XDIM*YDIM*zInner*16];
f17= fA_h[i+XDIM*YDIM*zInner*17];
f18= fA_h[i+XDIM*YDIM*zInner*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<dep+1<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
if(rho>-0.001f && rho<2.f){
}
else{
check = 1;
}
}
}
}
//top of GPU1
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = row*XDIM+col;
f0 = hA_h[i+XDIM*YDIM*0 ];
f1 = hA_h[i+XDIM*YDIM*1 ];
f2 = hA_h[i+XDIM*YDIM*2 ];
f3 = hA_h[i+XDIM*YDIM*3 ];
f4 = hA_h[i+XDIM*YDIM*4 ];
f5 = hA_h[i+XDIM*YDIM*5 ];
f6 = hA_h[i+XDIM*YDIM*6 ];
f7 = hA_h[i+XDIM*YDIM*7 ];
f8 = hA_h[i+XDIM*YDIM*8 ];
f9 = hA_h[i+XDIM*YDIM*9 ];
f10= hA_h[i+XDIM*YDIM*10];
f11= hA_h[i+XDIM*YDIM*11];
f12= hA_h[i+XDIM*YDIM*12];
f13= hA_h[i+XDIM*YDIM*13];
f14= hA_h[i+XDIM*YDIM*14];
f15= hA_h[i+XDIM*YDIM*15];
f16= hA_h[i+XDIM*YDIM*16];
f17= hA_h[i+XDIM*YDIM*17];
f18= hA_h[i+XDIM*YDIM*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<ZDIM/2-1<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}
}
//bottom of GPU2
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = row*XDIM+col;
f0 = gA_h2[i+XDIM*YDIM*0 ];
f1 = gA_h2[i+XDIM*YDIM*1 ];
f2 = gA_h2[i+XDIM*YDIM*2 ];
f3 = gA_h2[i+XDIM*YDIM*3 ];
f4 = gA_h2[i+XDIM*YDIM*4 ];
f5 = gA_h2[i+XDIM*YDIM*5 ];
f6 = gA_h2[i+XDIM*YDIM*6 ];
f7 = gA_h2[i+XDIM*YDIM*7 ];
f8 = gA_h2[i+XDIM*YDIM*8 ];
f9 = gA_h2[i+XDIM*YDIM*9 ];
f10= gA_h2[i+XDIM*YDIM*10];
f11= gA_h2[i+XDIM*YDIM*11];
f12= gA_h2[i+XDIM*YDIM*12];
f13= gA_h2[i+XDIM*YDIM*13];
f14= gA_h2[i+XDIM*YDIM*14];
f15= gA_h2[i+XDIM*YDIM*15];
f16= gA_h2[i+XDIM*YDIM*16];
f17= gA_h2[i+XDIM*YDIM*17];
f18= gA_h2[i+XDIM*YDIM*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<ZDIM/2<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}
}
for(dep = 0; dep<zInner; dep++){
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = dep*XDIM*YDIM+row*XDIM+col;
f0 = fA_h2[i+XDIM*YDIM*zInner*0 ];
f1 = fA_h2[i+XDIM*YDIM*zInner*1 ];
f2 = fA_h2[i+XDIM*YDIM*zInner*2 ];
f3 = fA_h2[i+XDIM*YDIM*zInner*3 ];
f4 = fA_h2[i+XDIM*YDIM*zInner*4 ];
f5 = fA_h2[i+XDIM*YDIM*zInner*5 ];
f6 = fA_h2[i+XDIM*YDIM*zInner*6 ];
f7 = fA_h2[i+XDIM*YDIM*zInner*7 ];
f8 = fA_h2[i+XDIM*YDIM*zInner*8 ];
f9 = fA_h2[i+XDIM*YDIM*zInner*9 ];
f10= fA_h2[i+XDIM*YDIM*zInner*10];
f11= fA_h2[i+XDIM*YDIM*zInner*11];
f12= fA_h2[i+XDIM*YDIM*zInner*12];
f13= fA_h2[i+XDIM*YDIM*zInner*13];
f14= fA_h2[i+XDIM*YDIM*zInner*14];
f15= fA_h2[i+XDIM*YDIM*zInner*15];
f16= fA_h2[i+XDIM*YDIM*zInner*16];
f17= fA_h2[i+XDIM*YDIM*zInner*17];
f18= fA_h2[i+XDIM*YDIM*zInner*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<ZDIM/2+dep+1<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h2[i]<<","<<vAv_h2[i]<<", "<<ufluc_h2[i]<<","<<vfluc_h2[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
if(rho>0.f && rho<2.f){
}
else{
check = 1;
}
}
}
}
//top of GPU2
for(row = 0; row<YDIM; row++){
for(col = 0; col<XDIM; col++){
i = row*XDIM+col;
f0 = hA_h2[i+XDIM*YDIM*0 ];
f1 = hA_h2[i+XDIM*YDIM*1 ];
f2 = hA_h2[i+XDIM*YDIM*2 ];
f3 = hA_h2[i+XDIM*YDIM*3 ];
f4 = hA_h2[i+XDIM*YDIM*4 ];
f5 = hA_h2[i+XDIM*YDIM*5 ];
f6 = hA_h2[i+XDIM*YDIM*6 ];
f7 = hA_h2[i+XDIM*YDIM*7 ];
f8 = hA_h2[i+XDIM*YDIM*8 ];
f9 = hA_h2[i+XDIM*YDIM*9 ];
f10= hA_h2[i+XDIM*YDIM*10];
f11= hA_h2[i+XDIM*YDIM*11];
f12= hA_h2[i+XDIM*YDIM*12];
f13= hA_h2[i+XDIM*YDIM*13];
f14= hA_h2[i+XDIM*YDIM*14];
f15= hA_h2[i+XDIM*YDIM*15];
f16= hA_h2[i+XDIM*YDIM*16];
f17= hA_h2[i+XDIM*YDIM*17];
f18= hA_h2[i+XDIM*YDIM*18];
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<col<<", "<<row<<", "<<ZDIM-1<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}
}
if(check == 1) cout<<"error!"<<endl;
output.close();
//for(int t = STARTF-1; t<TMAX; t++){
for(int t = 0; t<TMAX; t++){
output2<<t<<", "<<FX_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<", "
<<FY_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<", "
<<FZ_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<endl;
// output2<<t<<", "<<FX_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<", "
// <<FY_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<", "
// <<FZ_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<endl;
}
output2.close();
//cudaFree(image_d);
cudaFree(fA_d);
cudaFree(fB_d);
return(0);
}
|
e0c9a663e533386eca0b07f034d82c11cb0fe714.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "NeighborListGPUBinned.cuh"
#include "hoomd/TextureTools.h"
/*! \file NeighborListGPUBinned.cu
\brief Defines GPU kernel code for O(N) neighbor list generation on the GPU
*/
//! Texture for reading d_cell_xyzf
scalar4_tex_t cell_xyzf_1d_tex;
//! Warp-centric scan (Kepler and later)
template<int NT>
struct warp_scan_sm30
{
__device__ static int Scan(int tid, unsigned char x, unsigned char* total)
{
unsigned int laneid;
//This command gets the lane ID within the current warp
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
int first = laneid - tid;
#pragma unroll
for(int offset = 1; offset < NT; offset += offset)
{
int y = __shfl(x,(first + tid - offset) &(WARP_SIZE -1));
if(tid >= offset) x += y;
}
// all threads get the total from the last thread in the cta
*total = __shfl(x,first + NT - 1);
// shift by one (exclusive scan)
int y = __shfl(x,(first + tid - 1) &(WARP_SIZE-1));
x = tid ? y : 0;
return x;
}
};
//! Kernel call for generating neighbor list on the GPU (Kepler optimized version)
/*! \tparam flags Set bit 1 to enable body filtering. Set bit 2 to enable diameter filtering.
\param d_nlist Neighbor list data structure to write
\param d_n_neigh Number of neighbors to write
\param d_last_updated_pos Particle positions at this update are written to this array
\param d_conditions Conditions array for writing overflow condition
\param d_Nmax Maximum number of neighbors per type
\param d_head_list List of indexes to access \a d_nlist
\param d_pos Particle positions
\param d_body Particle body indices
\param d_diameter Particle diameters
\param N Number of particles
\param d_cell_size Number of particles in each cell
\param d_cell_xyzf Cell contents (xyzf array from CellList with flag=type)
\param d_cell_tdb Cell contents (tdb array from CellList with)
\param d_cell_adj Cell adjacency list
\param ci Cell indexer for indexing cells
\param cli Cell list indexer for indexing into d_cell_xyzf
\param cadji Adjacent cell indexer listing the 27 neighboring cells
\param box Simulation box dimensions
\param d_r_cut Cutoff radius stored by pair type r_cut(i,j)
\param r_buff The maximum radius for which to include particles as neighbors
\param ntypes Number of particle types
\param ghost_width Width of ghost cell layer
\note optimized for Kepler
*/
template<unsigned char flags, int threads_per_particle>
__global__ void gpu_compute_nlist_binned_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width)
{
bool filter_body = flags & 1;
bool diameter_shift = flags & 2;
// cache the r_listsq parameters into shared memory
Index2D typpair_idx(ntypes);
const unsigned int num_typ_parameters = typpair_idx.getNumElements();
// shared data for per type pair parameters
extern __shared__ unsigned char s_data[];
// pointer for the r_listsq data
Scalar *s_r_list = (Scalar *)(&s_data[0]);
unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]);
// load in the per type pair r_list
for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < num_typ_parameters)
{
Scalar r_cut = d_r_cut[cur_offset + threadIdx.x];
// force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable
s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0);
}
if (cur_offset + threadIdx.x < ntypes)
{
s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x];
}
}
__syncthreads();
// each set of threads_per_particle threads is going to compute the neighbor list for a single particle
int my_pidx;
if (gridDim.y > 1)
{
// fermi workaround
my_pidx = (blockIdx.x + blockIdx.y*65535) * (blockDim.x/threads_per_particle) + threadIdx.x/threads_per_particle;
}
else
{
my_pidx = blockIdx.x * (blockDim.x/threads_per_particle) + threadIdx.x/threads_per_particle;
}
// one thread per particle
if (my_pidx >= N) return;
Scalar4 my_postype = d_pos[my_pidx];
Scalar3 my_pos = make_scalar3(my_postype.x, my_postype.y, my_postype.z);
unsigned int my_type = __scalar_as_int(my_postype.w);
unsigned int my_body = d_body[my_pidx];
Scalar my_diam = d_diameter[my_pidx];
unsigned int my_head = d_head_list[my_pidx];
Scalar3 f = box.makeFraction(my_pos, ghost_width);
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
uchar3 periodic = box.getPeriodic();
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
int my_cell = ci(ib,jb,kb);
// index of current neighbor
unsigned int cur_adj = 0;
// current cell
unsigned int neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
// size of current cell
unsigned int neigh_size = d_cell_size[neigh_cell];
// current index in cell
int cur_offset = threadIdx.x % threads_per_particle;
bool done = false;
// total number of neighbors
unsigned int nneigh = 0;
while (! done)
{
// initalize with default
unsigned int neighbor;
unsigned char has_neighbor = 0;
// advance neighbor cell
while (cur_offset >= neigh_size && !done )
{
cur_offset -= neigh_size;
cur_adj++;
if (cur_adj < cadji.getW())
{
neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
neigh_size = d_cell_size[neigh_cell];
}
else
// we are past the end of the cell neighbors
done = true;
}
// if the first thread in the cta has no work, terminate the loop
if (done && !(threadIdx.x % threads_per_particle)) break;
if (!done)
{
Scalar4 cur_xyzf = texFetchScalar4(d_cell_xyzf, cell_xyzf_1d_tex, cli(cur_offset, neigh_cell));
Scalar4 cur_tdb = d_cell_tdb[cli(cur_offset, neigh_cell)];
// advance cur_offset
cur_offset += threads_per_particle;
unsigned int neigh_type = __scalar_as_int(cur_tdb.x);
// Only do the hard work if the particle should be included by r_cut(i,j)
Scalar r_list = s_r_list[typpair_idx(my_type,neigh_type)];
if (r_list > Scalar(0.0))
{
Scalar neigh_diam = cur_tdb.y;
unsigned int neigh_body = __scalar_as_int(cur_tdb.z);
Scalar3 neigh_pos = make_scalar3(cur_xyzf.x,
cur_xyzf.y,
cur_xyzf.z);
int cur_neigh = __scalar_as_int(cur_xyzf.w);
// compute the distance between the two particles
Scalar3 dx = my_pos - neigh_pos;
// wrap the periodic boundary conditions
dx = box.minImage(dx);
// compute dr squared
Scalar drsq = dot(dx,dx);
bool excluded = (my_pidx == cur_neigh);
if (filter_body && my_body != 0xffffffff)
excluded = excluded | (my_body == neigh_body);
Scalar sqshift = Scalar(0.0);
if (diameter_shift)
{
const Scalar delta = (my_diam + neigh_diam) * Scalar(0.5) - Scalar(1.0);
// r^2 < (r_list + delta)^2
// r^2 < r_listsq + delta^2 + 2*r_list*delta
sqshift = (delta + Scalar(2.0) * r_list) * delta;
}
// store result in shared memory
if (drsq <= (r_list*r_list + sqshift) && !excluded)
{
neighbor = cur_neigh;
has_neighbor = 1;
}
}
}
// no syncthreads here, we assume threads_per_particle < warp size
// scan over flags
int k = 0;
#if (__CUDA_ARCH__ >= 300)
unsigned char n = 1;
k = warp_scan_sm30<threads_per_particle>::Scan(threadIdx.x % threads_per_particle, has_neighbor, &n);
#endif
if (has_neighbor && (nneigh + k) < s_Nmax[my_type])
d_nlist[my_head + nneigh + k] = neighbor;
// increment total neighbor count
#if (__CUDA_ARCH__ >= 300)
nneigh += n;
#else
if (has_neighbor)
nneigh++;
#endif
} // end while
if (threadIdx.x % threads_per_particle == 0)
{
// flag if we need to grow the neighbor list
if (nneigh >= s_Nmax[my_type])
atomicMax(&d_conditions[my_type], nneigh);
d_n_neigh[my_pidx] = nneigh;
d_last_updated_pos[my_pidx] = my_postype;
}
}
//! determine maximum possible block size
template<typename T>
int get_max_block_size(T func)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)func);
int max_threads = attr.maxThreadsPerBlock;
// number of threads has to be multiple of warp size
max_threads -= max_threads % max_threads_per_particle;
return max_threads;
}
void gpu_nlist_binned_bind_texture(const Scalar4 *d_cell_xyzf, unsigned int n_elements)
{
// bind the position texture
cell_xyzf_1d_tex.normalized = false;
cell_xyzf_1d_tex.filterMode = hipFilterModePoint;
hipBindTexture(0, cell_xyzf_1d_tex, d_cell_xyzf, sizeof(Scalar4)*n_elements);
}
//! recursive template to launch neighborlist with given template parameters
/* \tparam cur_tpp Number of threads per particle (assumed to be power of two) */
template<int cur_tpp>
inline void launcher(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size)
{
// shared memory = r_listsq + Nmax + stuff needed for neighborlist (computed below)
Index2D typpair_idx(ntypes);
unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + sizeof(unsigned int)*ntypes;
if (tpp == cur_tpp && cur_tpp != 0)
{
if (!diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<0,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (!diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<1,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<2,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<2,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<3,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
hipLaunchKernelGGL(( gpu_compute_nlist_binned_kernel<3,cur_tpp>), dim3(grid), dim3(block_size),shared_size, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
}
else
{
launcher<cur_tpp/2>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
tpp,
filter_body,
diameter_shift,
block_size
);
}
}
//! template specialization to terminate recursion
template<>
inline void launcher<min_threads_per_particle/2>(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size)
{ }
hipError_t gpu_compute_nlist_binned(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const BoxDim& box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const unsigned int threads_per_particle,
const unsigned int block_size,
bool filter_body,
bool diameter_shift,
const Scalar3& ghost_width,
const unsigned int compute_capability)
{
launcher<max_threads_per_particle>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
threads_per_particle,
filter_body,
diameter_shift,
block_size
);
return hipSuccess;
}
| e0c9a663e533386eca0b07f034d82c11cb0fe714.cu | // Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "NeighborListGPUBinned.cuh"
#include "hoomd/TextureTools.h"
/*! \file NeighborListGPUBinned.cu
\brief Defines GPU kernel code for O(N) neighbor list generation on the GPU
*/
//! Texture for reading d_cell_xyzf
scalar4_tex_t cell_xyzf_1d_tex;
//! Warp-centric scan (Kepler and later)
template<int NT>
struct warp_scan_sm30
{
__device__ static int Scan(int tid, unsigned char x, unsigned char* total)
{
unsigned int laneid;
//This command gets the lane ID within the current warp
asm("mov.u32 %0, %%laneid;" : "=r"(laneid));
int first = laneid - tid;
#pragma unroll
for(int offset = 1; offset < NT; offset += offset)
{
int y = __shfl(x,(first + tid - offset) &(WARP_SIZE -1));
if(tid >= offset) x += y;
}
// all threads get the total from the last thread in the cta
*total = __shfl(x,first + NT - 1);
// shift by one (exclusive scan)
int y = __shfl(x,(first + tid - 1) &(WARP_SIZE-1));
x = tid ? y : 0;
return x;
}
};
//! Kernel call for generating neighbor list on the GPU (Kepler optimized version)
/*! \tparam flags Set bit 1 to enable body filtering. Set bit 2 to enable diameter filtering.
\param d_nlist Neighbor list data structure to write
\param d_n_neigh Number of neighbors to write
\param d_last_updated_pos Particle positions at this update are written to this array
\param d_conditions Conditions array for writing overflow condition
\param d_Nmax Maximum number of neighbors per type
\param d_head_list List of indexes to access \a d_nlist
\param d_pos Particle positions
\param d_body Particle body indices
\param d_diameter Particle diameters
\param N Number of particles
\param d_cell_size Number of particles in each cell
\param d_cell_xyzf Cell contents (xyzf array from CellList with flag=type)
\param d_cell_tdb Cell contents (tdb array from CellList with)
\param d_cell_adj Cell adjacency list
\param ci Cell indexer for indexing cells
\param cli Cell list indexer for indexing into d_cell_xyzf
\param cadji Adjacent cell indexer listing the 27 neighboring cells
\param box Simulation box dimensions
\param d_r_cut Cutoff radius stored by pair type r_cut(i,j)
\param r_buff The maximum radius for which to include particles as neighbors
\param ntypes Number of particle types
\param ghost_width Width of ghost cell layer
\note optimized for Kepler
*/
template<unsigned char flags, int threads_per_particle>
__global__ void gpu_compute_nlist_binned_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width)
{
bool filter_body = flags & 1;
bool diameter_shift = flags & 2;
// cache the r_listsq parameters into shared memory
Index2D typpair_idx(ntypes);
const unsigned int num_typ_parameters = typpair_idx.getNumElements();
// shared data for per type pair parameters
extern __shared__ unsigned char s_data[];
// pointer for the r_listsq data
Scalar *s_r_list = (Scalar *)(&s_data[0]);
unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]);
// load in the per type pair r_list
for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < num_typ_parameters)
{
Scalar r_cut = d_r_cut[cur_offset + threadIdx.x];
// force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable
s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0);
}
if (cur_offset + threadIdx.x < ntypes)
{
s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x];
}
}
__syncthreads();
// each set of threads_per_particle threads is going to compute the neighbor list for a single particle
int my_pidx;
if (gridDim.y > 1)
{
// fermi workaround
my_pidx = (blockIdx.x + blockIdx.y*65535) * (blockDim.x/threads_per_particle) + threadIdx.x/threads_per_particle;
}
else
{
my_pidx = blockIdx.x * (blockDim.x/threads_per_particle) + threadIdx.x/threads_per_particle;
}
// one thread per particle
if (my_pidx >= N) return;
Scalar4 my_postype = d_pos[my_pidx];
Scalar3 my_pos = make_scalar3(my_postype.x, my_postype.y, my_postype.z);
unsigned int my_type = __scalar_as_int(my_postype.w);
unsigned int my_body = d_body[my_pidx];
Scalar my_diam = d_diameter[my_pidx];
unsigned int my_head = d_head_list[my_pidx];
Scalar3 f = box.makeFraction(my_pos, ghost_width);
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
uchar3 periodic = box.getPeriodic();
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
int my_cell = ci(ib,jb,kb);
// index of current neighbor
unsigned int cur_adj = 0;
// current cell
unsigned int neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
// size of current cell
unsigned int neigh_size = d_cell_size[neigh_cell];
// current index in cell
int cur_offset = threadIdx.x % threads_per_particle;
bool done = false;
// total number of neighbors
unsigned int nneigh = 0;
while (! done)
{
// initalize with default
unsigned int neighbor;
unsigned char has_neighbor = 0;
// advance neighbor cell
while (cur_offset >= neigh_size && !done )
{
cur_offset -= neigh_size;
cur_adj++;
if (cur_adj < cadji.getW())
{
neigh_cell = d_cell_adj[cadji(cur_adj, my_cell)];
neigh_size = d_cell_size[neigh_cell];
}
else
// we are past the end of the cell neighbors
done = true;
}
// if the first thread in the cta has no work, terminate the loop
if (done && !(threadIdx.x % threads_per_particle)) break;
if (!done)
{
Scalar4 cur_xyzf = texFetchScalar4(d_cell_xyzf, cell_xyzf_1d_tex, cli(cur_offset, neigh_cell));
Scalar4 cur_tdb = d_cell_tdb[cli(cur_offset, neigh_cell)];
// advance cur_offset
cur_offset += threads_per_particle;
unsigned int neigh_type = __scalar_as_int(cur_tdb.x);
// Only do the hard work if the particle should be included by r_cut(i,j)
Scalar r_list = s_r_list[typpair_idx(my_type,neigh_type)];
if (r_list > Scalar(0.0))
{
Scalar neigh_diam = cur_tdb.y;
unsigned int neigh_body = __scalar_as_int(cur_tdb.z);
Scalar3 neigh_pos = make_scalar3(cur_xyzf.x,
cur_xyzf.y,
cur_xyzf.z);
int cur_neigh = __scalar_as_int(cur_xyzf.w);
// compute the distance between the two particles
Scalar3 dx = my_pos - neigh_pos;
// wrap the periodic boundary conditions
dx = box.minImage(dx);
// compute dr squared
Scalar drsq = dot(dx,dx);
bool excluded = (my_pidx == cur_neigh);
if (filter_body && my_body != 0xffffffff)
excluded = excluded | (my_body == neigh_body);
Scalar sqshift = Scalar(0.0);
if (diameter_shift)
{
const Scalar delta = (my_diam + neigh_diam) * Scalar(0.5) - Scalar(1.0);
// r^2 < (r_list + delta)^2
// r^2 < r_listsq + delta^2 + 2*r_list*delta
sqshift = (delta + Scalar(2.0) * r_list) * delta;
}
// store result in shared memory
if (drsq <= (r_list*r_list + sqshift) && !excluded)
{
neighbor = cur_neigh;
has_neighbor = 1;
}
}
}
// no syncthreads here, we assume threads_per_particle < warp size
// scan over flags
int k = 0;
#if (__CUDA_ARCH__ >= 300)
unsigned char n = 1;
k = warp_scan_sm30<threads_per_particle>::Scan(threadIdx.x % threads_per_particle, has_neighbor, &n);
#endif
if (has_neighbor && (nneigh + k) < s_Nmax[my_type])
d_nlist[my_head + nneigh + k] = neighbor;
// increment total neighbor count
#if (__CUDA_ARCH__ >= 300)
nneigh += n;
#else
if (has_neighbor)
nneigh++;
#endif
} // end while
if (threadIdx.x % threads_per_particle == 0)
{
// flag if we need to grow the neighbor list
if (nneigh >= s_Nmax[my_type])
atomicMax(&d_conditions[my_type], nneigh);
d_n_neigh[my_pidx] = nneigh;
d_last_updated_pos[my_pidx] = my_postype;
}
}
//! determine maximum possible block size
template<typename T>
int get_max_block_size(T func)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)func);
int max_threads = attr.maxThreadsPerBlock;
// number of threads has to be multiple of warp size
max_threads -= max_threads % max_threads_per_particle;
return max_threads;
}
void gpu_nlist_binned_bind_texture(const Scalar4 *d_cell_xyzf, unsigned int n_elements)
{
// bind the position texture
cell_xyzf_1d_tex.normalized = false;
cell_xyzf_1d_tex.filterMode = cudaFilterModePoint;
cudaBindTexture(0, cell_xyzf_1d_tex, d_cell_xyzf, sizeof(Scalar4)*n_elements);
}
//! recursive template to launch neighborlist with given template parameters
/* \tparam cur_tpp Number of threads per particle (assumed to be power of two) */
template<int cur_tpp>
inline void launcher(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size)
{
// shared memory = r_listsq + Nmax + stuff needed for neighborlist (computed below)
Index2D typpair_idx(ntypes);
unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + sizeof(unsigned int)*ntypes;
if (tpp == cur_tpp && cur_tpp != 0)
{
if (!diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<0,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
gpu_compute_nlist_binned_kernel<0,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (!diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<1,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
gpu_compute_nlist_binned_kernel<1,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (diameter_shift && !filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<2,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
gpu_compute_nlist_binned_kernel<2,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
else if (diameter_shift && filter_body)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
max_block_size = get_max_block_size(gpu_compute_nlist_binned_kernel<3,cur_tpp>);
if (compute_capability < 35) gpu_nlist_binned_bind_texture(d_cell_xyzf, cli.getNumElements());
block_size = block_size < max_block_size ? block_size : max_block_size;
dim3 grid(N / (block_size/tpp) + 1);
if (compute_capability < 30 && grid.x > 65535)
{
grid.y = grid.x/65535 + 1;
grid.x = 65535;
}
gpu_compute_nlist_binned_kernel<3,cur_tpp><<<grid, block_size,shared_size>>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width);
}
}
else
{
launcher<cur_tpp/2>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
tpp,
filter_body,
diameter_shift,
block_size
);
}
}
//! template specialization to terminate recursion
template<>
inline void launcher<min_threads_per_particle/2>(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const BoxDim box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const Scalar3 ghost_width,
const unsigned int compute_capability,
unsigned int tpp,
bool filter_body,
bool diameter_shift,
unsigned int block_size)
{ }
cudaError_t gpu_compute_nlist_binned(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const unsigned int *d_Nmax,
const unsigned int *d_head_list,
const Scalar4 *d_pos,
const unsigned int *d_body,
const Scalar *d_diameter,
const unsigned int N,
const unsigned int *d_cell_size,
const Scalar4 *d_cell_xyzf,
const Scalar4 *d_cell_tdb,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const BoxDim& box,
const Scalar *d_r_cut,
const Scalar r_buff,
const unsigned int ntypes,
const unsigned int threads_per_particle,
const unsigned int block_size,
bool filter_body,
bool diameter_shift,
const Scalar3& ghost_width,
const unsigned int compute_capability)
{
launcher<max_threads_per_particle>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
d_Nmax,
d_head_list,
d_pos,
d_body,
d_diameter,
N,
d_cell_size,
d_cell_xyzf,
d_cell_tdb,
d_cell_adj,
ci,
cli,
cadji,
box,
d_r_cut,
r_buff,
ntypes,
ghost_width,
compute_capability,
threads_per_particle,
filter_body,
diameter_shift,
block_size
);
return cudaSuccess;
}
|
0b5f52f520a2007111ffa16cf377fe7eebeadf5f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// https://forums.developer.nvidia.com/t/any-way-to-know-on-which-sm-a-thread-is-running/19974/15
/* E.D. Riedijk */
__device__ uint get_smid(void) {
uint ret;
asm("mov.u32 %0, %smid;" : "=r"(ret) );
return ret;
}
__global__ void kern(int *sm){
if (threadIdx.x==0)
sm[blockIdx.x]=get_smid();
}
int main(){
int N = 100;
int *sm, *sm_d;
sm = (int *) malloc(N*sizeof(*sm));
hipMalloc((void**)&sm_d,N*sizeof(*sm_d));
hipLaunchKernelGGL(( kern), dim3(N),dim3(N), 0, 0, sm_d);
hipMemcpy(sm, sm_d, N*sizeof(int), hipMemcpyDeviceToHost);
for (int i=0;i<N;i++)
printf("%d %d\n",i,sm[i]);
return 0;
} | 0b5f52f520a2007111ffa16cf377fe7eebeadf5f.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// https://forums.developer.nvidia.com/t/any-way-to-know-on-which-sm-a-thread-is-running/19974/15
/* E.D. Riedijk */
__device__ uint get_smid(void) {
uint ret;
asm("mov.u32 %0, %smid;" : "=r"(ret) );
return ret;
}
__global__ void kern(int *sm){
if (threadIdx.x==0)
sm[blockIdx.x]=get_smid();
}
int main(){
int N = 100;
int *sm, *sm_d;
sm = (int *) malloc(N*sizeof(*sm));
cudaMalloc((void**)&sm_d,N*sizeof(*sm_d));
kern<<<N,N>>>( sm_d);
cudaMemcpy(sm, sm_d, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i=0;i<N;i++)
printf("%d %d\n",i,sm[i]);
return 0;
} |
ebb506f23386ac3f9f41b729dbdbea095dc217df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2014 richards-tech
//
// This file is part of RTGPULib
//
// RTGPULib is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// RTGPULib is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with RTGPULib. If not, see <http://www.gnu.org/licenses/>.
//
//
#include "RTGPUDefs.h"
#define NTH 32
texture<unsigned char, 2> gpTexGray; // the gray char texture
texture<uchar4, 2> gpTexRGB; // the RGB texture
__global__ void kernelMorphOps3x3Gray(unsigned char *pOutput, int nW, int nMode)
{
unsigned char *pOut = pOutput + blockIdx.x * nW;
int nPix = (nW + NTH - 1)/NTH;
int iStart = nPix * threadIdx.x;
int iStop = iStart + nPix;
unsigned char minval, maxval;
unsigned char px00, px01, px02, px10, px11, px12, px20, px21, px22;
if (iStop > nW)
iStop = nW;
px00 = tex2D( gpTexGray, (float) (iStart-1), (float) (blockIdx.x-1)); // preset pixel values
px01 = tex2D( gpTexGray, (float) (iStart-1), (float) (blockIdx.x-0));
px02 = tex2D( gpTexGray, (float) (iStart-1), (float) (blockIdx.x+1));
px10 = tex2D( gpTexGray, (float) (iStart), (float) (blockIdx.x-1));
px11 = tex2D( gpTexGray, (float) (iStart), (float) (blockIdx.x-0));
px12 = tex2D( gpTexGray, (float) (iStart), (float) (blockIdx.x+1));
px20 = tex2D( gpTexGray, (float) (iStart+1), (float) (blockIdx.x-1));
px21 = tex2D( gpTexGray, (float) (iStart+1), (float) (blockIdx.x-0));
px22 = tex2D( gpTexGray, (float) (iStart+1), (float) (blockIdx.x+1));
minval = 255;
maxval = 0;
switch (nMode)
{
case GPU_MORPHOPS_DILATE:
RTGPU_MAX(px00, maxval);
RTGPU_MAX(px01, maxval);
RTGPU_MAX(px02, maxval);
RTGPU_MAX(px10, maxval);
RTGPU_MAX(px11, maxval);
RTGPU_MAX(px12, maxval);
RTGPU_MAX(px20, maxval);
RTGPU_MAX(px21, maxval);
RTGPU_MAX(px22, maxval);
pOut[iStart] = maxval;
break;
case GPU_MORPHOPS_ERODE:
RTGPU_MIN(px00, minval);
RTGPU_MIN(px01, minval);
RTGPU_MIN(px02, minval);
RTGPU_MIN(px10, minval);
RTGPU_MIN(px11, minval);
RTGPU_MIN(px12, minval);
RTGPU_MIN(px20, minval);
RTGPU_MIN(px21, minval);
RTGPU_MIN(px22, minval);
pOut[iStart] = minval;
break;
}
for ( int i = iStart+1; i < iStop; i++ )
{
// replace correct set of column pixels
switch (i % 3)
{
case 0:
px00 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-1)); // load new values
px01 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-0));
px02 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x+1));
break;
case 1:
px10 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-1));
px11 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-0));
px12 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x+1));
break;
case 2:
px20 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-1));
px21 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-0));
px22 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x+1));
break;
}
minval = 255;
maxval = 0;
switch (nMode)
{
case GPU_MORPHOPS_DILATE:
RTGPU_MAX(px00, maxval);
RTGPU_MAX(px01, maxval);
RTGPU_MAX(px02, maxval);
RTGPU_MAX(px10, maxval);
RTGPU_MAX(px11, maxval);
RTGPU_MAX(px12, maxval);
RTGPU_MAX(px20, maxval);
RTGPU_MAX(px21, maxval);
RTGPU_MAX(px22, maxval);
pOut[i] = maxval;
break;
case GPU_MORPHOPS_ERODE:
RTGPU_MIN(px00, minval);
RTGPU_MIN(px01, minval);
RTGPU_MIN(px02, minval);
RTGPU_MIN(px10, minval);
RTGPU_MIN(px11, minval);
RTGPU_MIN(px12, minval);
RTGPU_MIN(px20, minval);
RTGPU_MIN(px21, minval);
RTGPU_MIN(px22, minval);
pOut[i] = minval;
break;
}
}
}
__global__ void kernelMorphOps3x3RGB(uchar4 *pOutput, int nW, int nMode)
{
uchar4 *pOut = pOutput + blockIdx.x * nW;
int nPix = (nW + NTH - 1)/NTH;
int iStart = nPix * threadIdx.x;
int iStop = iStart + nPix;
uchar4 minval, maxval;
uchar4 px00, px01, px02, px10, px11, px12, px20, px21, px22;
if (iStop > nW)
iStop = nW;
px00 = tex2D( gpTexRGB, (float) (iStart-1), (float) (blockIdx.x-1)); // preset pixel values
px01 = tex2D( gpTexRGB, (float) (iStart-1), (float) (blockIdx.x-0));
px02 = tex2D( gpTexRGB, (float) (iStart-1), (float) (blockIdx.x+1));
px10 = tex2D( gpTexRGB, (float) (iStart), (float) (blockIdx.x-1));
px11 = tex2D( gpTexRGB, (float) (iStart), (float) (blockIdx.x-0));
px12 = tex2D( gpTexRGB, (float) (iStart), (float) (blockIdx.x+1));
px20 = tex2D( gpTexRGB, (float) (iStart+1), (float) (blockIdx.x-1));
px21 = tex2D( gpTexRGB, (float) (iStart+1), (float) (blockIdx.x-0));
px22 = tex2D( gpTexRGB, (float) (iStart+1), (float) (blockIdx.x+1));
minval.x = minval.y = minval.z = minval.w = 255;
maxval.x = maxval.y = maxval.z = maxval.w = 0;
switch (nMode)
{
case GPU_MORPHOPS_DILATE:
RTGPU_MAX4(px00, maxval);
RTGPU_MAX4(px01, maxval);
RTGPU_MAX4(px02, maxval);
RTGPU_MAX4(px10, maxval);
RTGPU_MAX4(px11, maxval);
RTGPU_MAX4(px12, maxval);
RTGPU_MAX4(px20, maxval);
RTGPU_MAX4(px21, maxval);
RTGPU_MAX4(px22, maxval);
pOut[iStart] = maxval;
break;
case GPU_MORPHOPS_ERODE:
RTGPU_MIN4(px00, minval);
RTGPU_MIN4(px01, minval);
RTGPU_MIN4(px02, minval);
RTGPU_MIN4(px10, minval);
RTGPU_MIN4(px11, minval);
RTGPU_MIN4(px12, minval);
RTGPU_MIN4(px20, minval);
RTGPU_MIN4(px21, minval);
RTGPU_MIN4(px22, minval);
pOut[iStart] = minval;
break;
}
for ( int i = iStart+1; i < iStop; i++ )
{
// replace correct set of column pixels
switch (i % 3)
{
case 0:
px00 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-1)); // load new values
px01 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-0));
px02 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x+1));
break;
case 1:
px10 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-1));
px11 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-0));
px12 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x+1));
break;
case 2:
px20 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-1));
px21 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-0));
px22 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x+1));
break;
}
minval.x = minval.y = minval.z = minval.w = 255;
maxval.x = maxval.y = maxval.z = maxval.w = 0;
switch (nMode)
{
case GPU_MORPHOPS_DILATE:
RTGPU_MAX4(px00, maxval);
RTGPU_MAX4(px01, maxval);
RTGPU_MAX4(px02, maxval);
RTGPU_MAX4(px10, maxval);
RTGPU_MAX4(px11, maxval);
RTGPU_MAX4(px12, maxval);
RTGPU_MAX4(px20, maxval);
RTGPU_MAX4(px21, maxval);
RTGPU_MAX4(px22, maxval);
pOut[i] = maxval;
break;
case GPU_MORPHOPS_ERODE:
RTGPU_MIN4(px00, minval);
RTGPU_MIN4(px01, minval);
RTGPU_MIN4(px02, minval);
RTGPU_MIN4(px10, minval);
RTGPU_MIN4(px11, minval);
RTGPU_MIN4(px12, minval);
RTGPU_MIN4(px20, minval);
RTGPU_MIN4(px21, minval);
RTGPU_MIN4(px22, minval);
pOut[i] = minval;
break;
}
}
}
extern "C" int _RTGPUMorphOps3x3(int srcSlot, int destSlot, int mode)
{
RTGPU_IMAGE *SI, *DI;
RTGPUTrace("RTGPUMorphOps3x3");
hipChannelFormatDesc desc;
RTGPU_SLOTPTR(srcSlot, SI);
RTGPU_SLOTPTR(destSlot, DI);
_RTGPUSetupSlot(DI, SI->width, SI->height, SI->color);
if (!SI->color) {
desc = hipCreateChannelDesc<unsigned char>();
RTGPUSafeCall(hipBindTexture2D(NULL, gpTexGray, SI->image, desc, SI->width, SI->height, SI->width));
hipLaunchKernelGGL(( kernelMorphOps3x3Gray), dim3(SI->height), dim3(NTH), 0, 0, (unsigned char *)DI->image, SI->width, mode);
RTGPUSafeCall(hipUnbindTexture(gpTexGray));
} else {
desc = hipCreateChannelDesc<uchar4>();
RTGPUSafeCall(hipBindTexture2D(NULL, gpTexRGB, SI->image, desc, SI->width, SI->height, SI->width * 4));
hipLaunchKernelGGL(( kernelMorphOps3x3RGB), dim3(SI->height), dim3(NTH), 0, 0, (uchar4 *)DI->image, SI->width, mode);
RTGPUSafeCall(hipUnbindTexture(gpTexRGB));
}
return 1;
}
| ebb506f23386ac3f9f41b729dbdbea095dc217df.cu | //
// Copyright (c) 2014 richards-tech
//
// This file is part of RTGPULib
//
// RTGPULib is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// RTGPULib is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with RTGPULib. If not, see <http://www.gnu.org/licenses/>.
//
//
#include "RTGPUDefs.h"
#define NTH 32
texture<unsigned char, 2> gpTexGray; // the gray char texture
texture<uchar4, 2> gpTexRGB; // the RGB texture
__global__ void kernelMorphOps3x3Gray(unsigned char *pOutput, int nW, int nMode)
{
unsigned char *pOut = pOutput + blockIdx.x * nW;
int nPix = (nW + NTH - 1)/NTH;
int iStart = nPix * threadIdx.x;
int iStop = iStart + nPix;
unsigned char minval, maxval;
unsigned char px00, px01, px02, px10, px11, px12, px20, px21, px22;
if (iStop > nW)
iStop = nW;
px00 = tex2D( gpTexGray, (float) (iStart-1), (float) (blockIdx.x-1)); // preset pixel values
px01 = tex2D( gpTexGray, (float) (iStart-1), (float) (blockIdx.x-0));
px02 = tex2D( gpTexGray, (float) (iStart-1), (float) (blockIdx.x+1));
px10 = tex2D( gpTexGray, (float) (iStart), (float) (blockIdx.x-1));
px11 = tex2D( gpTexGray, (float) (iStart), (float) (blockIdx.x-0));
px12 = tex2D( gpTexGray, (float) (iStart), (float) (blockIdx.x+1));
px20 = tex2D( gpTexGray, (float) (iStart+1), (float) (blockIdx.x-1));
px21 = tex2D( gpTexGray, (float) (iStart+1), (float) (blockIdx.x-0));
px22 = tex2D( gpTexGray, (float) (iStart+1), (float) (blockIdx.x+1));
minval = 255;
maxval = 0;
switch (nMode)
{
case GPU_MORPHOPS_DILATE:
RTGPU_MAX(px00, maxval);
RTGPU_MAX(px01, maxval);
RTGPU_MAX(px02, maxval);
RTGPU_MAX(px10, maxval);
RTGPU_MAX(px11, maxval);
RTGPU_MAX(px12, maxval);
RTGPU_MAX(px20, maxval);
RTGPU_MAX(px21, maxval);
RTGPU_MAX(px22, maxval);
pOut[iStart] = maxval;
break;
case GPU_MORPHOPS_ERODE:
RTGPU_MIN(px00, minval);
RTGPU_MIN(px01, minval);
RTGPU_MIN(px02, minval);
RTGPU_MIN(px10, minval);
RTGPU_MIN(px11, minval);
RTGPU_MIN(px12, minval);
RTGPU_MIN(px20, minval);
RTGPU_MIN(px21, minval);
RTGPU_MIN(px22, minval);
pOut[iStart] = minval;
break;
}
for ( int i = iStart+1; i < iStop; i++ )
{
// replace correct set of column pixels
switch (i % 3)
{
case 0:
px00 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-1)); // load new values
px01 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-0));
px02 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x+1));
break;
case 1:
px10 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-1));
px11 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-0));
px12 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x+1));
break;
case 2:
px20 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-1));
px21 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x-0));
px22 = tex2D( gpTexGray, (float) (i + 1), (float) (blockIdx.x+1));
break;
}
minval = 255;
maxval = 0;
switch (nMode)
{
case GPU_MORPHOPS_DILATE:
RTGPU_MAX(px00, maxval);
RTGPU_MAX(px01, maxval);
RTGPU_MAX(px02, maxval);
RTGPU_MAX(px10, maxval);
RTGPU_MAX(px11, maxval);
RTGPU_MAX(px12, maxval);
RTGPU_MAX(px20, maxval);
RTGPU_MAX(px21, maxval);
RTGPU_MAX(px22, maxval);
pOut[i] = maxval;
break;
case GPU_MORPHOPS_ERODE:
RTGPU_MIN(px00, minval);
RTGPU_MIN(px01, minval);
RTGPU_MIN(px02, minval);
RTGPU_MIN(px10, minval);
RTGPU_MIN(px11, minval);
RTGPU_MIN(px12, minval);
RTGPU_MIN(px20, minval);
RTGPU_MIN(px21, minval);
RTGPU_MIN(px22, minval);
pOut[i] = minval;
break;
}
}
}
__global__ void kernelMorphOps3x3RGB(uchar4 *pOutput, int nW, int nMode)
{
uchar4 *pOut = pOutput + blockIdx.x * nW;
int nPix = (nW + NTH - 1)/NTH;
int iStart = nPix * threadIdx.x;
int iStop = iStart + nPix;
uchar4 minval, maxval;
uchar4 px00, px01, px02, px10, px11, px12, px20, px21, px22;
if (iStop > nW)
iStop = nW;
px00 = tex2D( gpTexRGB, (float) (iStart-1), (float) (blockIdx.x-1)); // preset pixel values
px01 = tex2D( gpTexRGB, (float) (iStart-1), (float) (blockIdx.x-0));
px02 = tex2D( gpTexRGB, (float) (iStart-1), (float) (blockIdx.x+1));
px10 = tex2D( gpTexRGB, (float) (iStart), (float) (blockIdx.x-1));
px11 = tex2D( gpTexRGB, (float) (iStart), (float) (blockIdx.x-0));
px12 = tex2D( gpTexRGB, (float) (iStart), (float) (blockIdx.x+1));
px20 = tex2D( gpTexRGB, (float) (iStart+1), (float) (blockIdx.x-1));
px21 = tex2D( gpTexRGB, (float) (iStart+1), (float) (blockIdx.x-0));
px22 = tex2D( gpTexRGB, (float) (iStart+1), (float) (blockIdx.x+1));
minval.x = minval.y = minval.z = minval.w = 255;
maxval.x = maxval.y = maxval.z = maxval.w = 0;
switch (nMode)
{
case GPU_MORPHOPS_DILATE:
RTGPU_MAX4(px00, maxval);
RTGPU_MAX4(px01, maxval);
RTGPU_MAX4(px02, maxval);
RTGPU_MAX4(px10, maxval);
RTGPU_MAX4(px11, maxval);
RTGPU_MAX4(px12, maxval);
RTGPU_MAX4(px20, maxval);
RTGPU_MAX4(px21, maxval);
RTGPU_MAX4(px22, maxval);
pOut[iStart] = maxval;
break;
case GPU_MORPHOPS_ERODE:
RTGPU_MIN4(px00, minval);
RTGPU_MIN4(px01, minval);
RTGPU_MIN4(px02, minval);
RTGPU_MIN4(px10, minval);
RTGPU_MIN4(px11, minval);
RTGPU_MIN4(px12, minval);
RTGPU_MIN4(px20, minval);
RTGPU_MIN4(px21, minval);
RTGPU_MIN4(px22, minval);
pOut[iStart] = minval;
break;
}
for ( int i = iStart+1; i < iStop; i++ )
{
// replace correct set of column pixels
switch (i % 3)
{
case 0:
px00 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-1)); // load new values
px01 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-0));
px02 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x+1));
break;
case 1:
px10 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-1));
px11 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-0));
px12 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x+1));
break;
case 2:
px20 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-1));
px21 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x-0));
px22 = tex2D( gpTexRGB, (float) (i + 1), (float) (blockIdx.x+1));
break;
}
minval.x = minval.y = minval.z = minval.w = 255;
maxval.x = maxval.y = maxval.z = maxval.w = 0;
switch (nMode)
{
case GPU_MORPHOPS_DILATE:
RTGPU_MAX4(px00, maxval);
RTGPU_MAX4(px01, maxval);
RTGPU_MAX4(px02, maxval);
RTGPU_MAX4(px10, maxval);
RTGPU_MAX4(px11, maxval);
RTGPU_MAX4(px12, maxval);
RTGPU_MAX4(px20, maxval);
RTGPU_MAX4(px21, maxval);
RTGPU_MAX4(px22, maxval);
pOut[i] = maxval;
break;
case GPU_MORPHOPS_ERODE:
RTGPU_MIN4(px00, minval);
RTGPU_MIN4(px01, minval);
RTGPU_MIN4(px02, minval);
RTGPU_MIN4(px10, minval);
RTGPU_MIN4(px11, minval);
RTGPU_MIN4(px12, minval);
RTGPU_MIN4(px20, minval);
RTGPU_MIN4(px21, minval);
RTGPU_MIN4(px22, minval);
pOut[i] = minval;
break;
}
}
}
extern "C" int _RTGPUMorphOps3x3(int srcSlot, int destSlot, int mode)
{
RTGPU_IMAGE *SI, *DI;
RTGPUTrace("RTGPUMorphOps3x3");
cudaChannelFormatDesc desc;
RTGPU_SLOTPTR(srcSlot, SI);
RTGPU_SLOTPTR(destSlot, DI);
_RTGPUSetupSlot(DI, SI->width, SI->height, SI->color);
if (!SI->color) {
desc = cudaCreateChannelDesc<unsigned char>();
RTGPUSafeCall(cudaBindTexture2D(NULL, gpTexGray, SI->image, desc, SI->width, SI->height, SI->width));
kernelMorphOps3x3Gray<<<SI->height, NTH>>>((unsigned char *)DI->image, SI->width, mode);
RTGPUSafeCall(cudaUnbindTexture(gpTexGray));
} else {
desc = cudaCreateChannelDesc<uchar4>();
RTGPUSafeCall(cudaBindTexture2D(NULL, gpTexRGB, SI->image, desc, SI->width, SI->height, SI->width * 4));
kernelMorphOps3x3RGB<<<SI->height, NTH>>>((uchar4 *)DI->image, SI->width, mode);
RTGPUSafeCall(cudaUnbindTexture(gpTexRGB));
}
return 1;
}
|
b6740b20a88894498422164f89268022de3d5444.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../includes/headers.h"
int main(int argc, char** argv){
//configurable parameters for data set
const int NUM_VERTICES = 8192;
const size_t VERTEX_BYTES = NUM_VERTICES * sizeof(int);
const int NUM_EDGES = 524288;
const size_t EDGE_BYTES = NUM_EDGES * sizeof(Edge);
const int STARTING_VERTEX = 5571;
hipError_t err = hipSuccess;
//assign thread configuration
int threadsPerBlock = 1024;
int blocks =(NUM_VERTICES + threadsPerBlock - 1) / threadsPerBlock;
//int edgeBlocks =(NUM_EDGES + threadsPerBlock - 1) / threadsPerBlock;
clock_t begin, end;
double time_spent;
int edgeCounter= 0;
//declare the two arrays on host
int h_vertices[NUM_VERTICES];
Edge h_edges[NUM_EDGES];
//read file and write into host array
FILE *infile;
const char *path = "DataSet/8192-524288.txt";
char line[100];
int first, second;
infile = fopen(path, "r");
if (!infile) {
printf("Couldn't open %s for reading\n", path);
exit(-1);
}
while (fgets(line, sizeof(line), infile)!= NULL)
{
sscanf(line, "%d\t%d", &first, &second);
h_edges[edgeCounter].first = first;
h_edges[edgeCounter].second = second;
edgeCounter++;
}
fclose(infile);
//debugging log to check that the array has been correctly written
// for (int i = 0; i < NUM_EDGES; ++i)
// {
// printf("%d -> %d", h_edges[i].first, h_edges[i].second);
// printf(((i % 4) != 3) ? "\t":"\n");
// }
//define pointers two device arrays
Edge* d_edges;
int* d_vertices;
//allocate memory on device for both arrays
err = hipMalloc((void**)&d_edges, EDGE_BYTES);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate edges array on device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void**)&d_vertices, VERTEX_BYTES);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate vertices array on device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//copy vertices array from host to device
err = hipMemcpy(d_vertices, h_vertices, VERTEX_BYTES, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vertices array from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocks, threadsPerBlock);
hipLaunchKernelGGL(( initialize_vertices), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_vertices, STARTING_VERTEX);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch initialization kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Initialization completed\n");
err = hipMemcpy(h_vertices, d_vertices, VERTEX_BYTES, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vertices array from device to kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//debugging log to check that the vertices has has been correctly initialized and copied back to host
// for (int i = 0; i < NUM_VERTICES; ++i)
// {
// printf("%d : %d", i, h_vertices[i]);
// printf(((i % 4) != 3) ? "\t":"\n");
// }
//copy host vertices and edges array to device and prepare to launch kernel
err = hipMemcpy(d_vertices, h_vertices, VERTEX_BYTES, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vertices array from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_edges, h_edges, EDGE_BYTES, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy edges array from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Initialize depth counters
int previous_depth = 0;
int current_depth = 1;
//Allocate and initialize termination variable modified on host and device
int* d_modified;
int h_modified;
err = hipMalloc((void**)&d_modified, sizeof(int));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocte d_done(error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
begin = clock();
do{
h_modified = 0;
err = hipMemcpy(d_modified, &h_modified, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy h_done to device(error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("CUDA kernel launching with %d blocks of %d threads\n", vertexBlocks, threadsPerBlock);
hipLaunchKernelGGL(( bfs), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_edges, d_vertices, previous_depth, current_depth, d_modified);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch bfs kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(&h_modified, d_modified, sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy d_done to host(error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("BFS run for level %d\n", current_depth);
previous_depth++;
current_depth++;
}while(h_modified != 0);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Time taken: %f\n", time_spent);
hipFree(d_edges);
hipFree(d_vertices);
//hipFree(d_done);
//hipFree(d_current_depth);
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
} | b6740b20a88894498422164f89268022de3d5444.cu | #include "../includes/headers.h"
int main(int argc, char** argv){
//configurable parameters for data set
const int NUM_VERTICES = 8192;
const size_t VERTEX_BYTES = NUM_VERTICES * sizeof(int);
const int NUM_EDGES = 524288;
const size_t EDGE_BYTES = NUM_EDGES * sizeof(Edge);
const int STARTING_VERTEX = 5571;
cudaError_t err = cudaSuccess;
//assign thread configuration
int threadsPerBlock = 1024;
int blocks =(NUM_VERTICES + threadsPerBlock - 1) / threadsPerBlock;
//int edgeBlocks =(NUM_EDGES + threadsPerBlock - 1) / threadsPerBlock;
clock_t begin, end;
double time_spent;
int edgeCounter= 0;
//declare the two arrays on host
int h_vertices[NUM_VERTICES];
Edge h_edges[NUM_EDGES];
//read file and write into host array
FILE *infile;
const char *path = "DataSet/8192-524288.txt";
char line[100];
int first, second;
infile = fopen(path, "r");
if (!infile) {
printf("Couldn't open %s for reading\n", path);
exit(-1);
}
while (fgets(line, sizeof(line), infile)!= NULL)
{
sscanf(line, "%d\t%d", &first, &second);
h_edges[edgeCounter].first = first;
h_edges[edgeCounter].second = second;
edgeCounter++;
}
fclose(infile);
//debugging log to check that the array has been correctly written
// for (int i = 0; i < NUM_EDGES; ++i)
// {
// printf("%d -> %d", h_edges[i].first, h_edges[i].second);
// printf(((i % 4) != 3) ? "\t":"\n");
// }
//define pointers two device arrays
Edge* d_edges;
int* d_vertices;
//allocate memory on device for both arrays
err = cudaMalloc((void**)&d_edges, EDGE_BYTES);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate edges array on device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_vertices, VERTEX_BYTES);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate vertices array on device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//copy vertices array from host to device
err = cudaMemcpy(d_vertices, h_vertices, VERTEX_BYTES, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vertices array from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocks, threadsPerBlock);
initialize_vertices<<<blocks, threadsPerBlock>>>(d_vertices, STARTING_VERTEX);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch initialization kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Initialization completed\n");
err = cudaMemcpy(h_vertices, d_vertices, VERTEX_BYTES, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vertices array from device to kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//debugging log to check that the vertices has has been correctly initialized and copied back to host
// for (int i = 0; i < NUM_VERTICES; ++i)
// {
// printf("%d : %d", i, h_vertices[i]);
// printf(((i % 4) != 3) ? "\t":"\n");
// }
//copy host vertices and edges array to device and prepare to launch kernel
err = cudaMemcpy(d_vertices, h_vertices, VERTEX_BYTES, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vertices array from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_edges, h_edges, EDGE_BYTES, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy edges array from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Initialize depth counters
int previous_depth = 0;
int current_depth = 1;
//Allocate and initialize termination variable modified on host and device
int* d_modified;
int h_modified;
err = cudaMalloc((void**)&d_modified, sizeof(int));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocte d_done(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
begin = clock();
do{
h_modified = 0;
err = cudaMemcpy(d_modified, &h_modified, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy h_done to device(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("CUDA kernel launching with %d blocks of %d threads\n", vertexBlocks, threadsPerBlock);
bfs<<<blocks, threadsPerBlock>>>(d_edges, d_vertices, previous_depth, current_depth, d_modified);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch bfs kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(&h_modified, d_modified, sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy d_done to host(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("BFS run for level %d\n", current_depth);
previous_depth++;
current_depth++;
}while(h_modified != 0);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Time taken: %f\n", time_spent);
cudaFree(d_edges);
cudaFree(d_vertices);
//cudaFree(d_done);
//cudaFree(d_current_depth);
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
} |
a0dac3681289f2be5ce62e09dcd73f555824255d.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
//////////////////////////////////////////////////////////////////////////
// min
namespace arithm
{
struct VMin4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin4(a, b);
}
__host__ __device__ __forceinline__ VMin4() {}
__host__ __device__ __forceinline__ VMin4(const VMin4&) {}
};
struct VMin2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin2(a, b);
}
__host__ __device__ __forceinline__ VMin2() {}
__host__ __device__ __forceinline__ VMin2(const VMin2&) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VMin4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VMin2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< minimum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< minimum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void minMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VMin4(), WithOutMask(), stream);
}
void minMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VMin2(), WithOutMask(), stream);
}
template <typename T> void minMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, minimum<T>(), WithOutMask(), stream);
}
template void minMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T> void minScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(minimum<T>(), src2), WithOutMask(), stream);
}
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// max
namespace arithm
{
struct VMax4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax4(a, b);
}
__host__ __device__ __forceinline__ VMax4() {}
__host__ __device__ __forceinline__ VMax4(const VMax4&) {}
};
struct VMax2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax2(a, b);
}
__host__ __device__ __forceinline__ VMax2() {}
__host__ __device__ __forceinline__ VMax2(const VMax2&) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VMax4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VMax2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< maximum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< maximum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void maxMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VMax4(), WithOutMask(), stream);
}
void maxMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VMax2(), WithOutMask(), stream);
}
template <typename T> void maxMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, maximum<T>(), WithOutMask(), stream);
}
template void maxMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T> void maxScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(maximum<T>(), src2), WithOutMask(), stream);
}
template void maxScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
}
#endif // CUDA_DISABLER
| a0dac3681289f2be5ce62e09dcd73f555824255d.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
//////////////////////////////////////////////////////////////////////////
// min
namespace arithm
{
struct VMin4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin4(a, b);
}
__host__ __device__ __forceinline__ VMin4() {}
__host__ __device__ __forceinline__ VMin4(const VMin4&) {}
};
struct VMin2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin2(a, b);
}
__host__ __device__ __forceinline__ VMin2() {}
__host__ __device__ __forceinline__ VMin2(const VMin2&) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VMin4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VMin2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< minimum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< minimum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void minMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VMin4(), WithOutMask(), stream);
}
void minMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VMin2(), WithOutMask(), stream);
}
template <typename T> void minMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, minimum<T>(), WithOutMask(), stream);
}
template void minMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T> void minScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(minimum<T>(), src2), WithOutMask(), stream);
}
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// max
namespace arithm
{
struct VMax4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax4(a, b);
}
__host__ __device__ __forceinline__ VMax4() {}
__host__ __device__ __forceinline__ VMax4(const VMax4&) {}
};
struct VMax2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax2(a, b);
}
__host__ __device__ __forceinline__ VMax2() {}
__host__ __device__ __forceinline__ VMax2(const VMax2&) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VMax4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VMax2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< maximum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< maximum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void maxMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VMax4(), WithOutMask(), stream);
}
void maxMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VMax2(), WithOutMask(), stream);
}
template <typename T> void maxMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, maximum<T>(), WithOutMask(), stream);
}
template void maxMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T> void maxScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(maximum<T>(), src2), WithOutMask(), stream);
}
template void maxScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
}
#endif // CUDA_DISABLER
|
682b79de8cb7fbeb851c808cf0d32d277551ef16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 160
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width){
for(int i = 0 ; i < GRID_SIZE; i++){
for(int j = 0; j < GRID_SIZE; j++){
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
sum += in[start + ii * width + jj] * mul[jj];
}
}
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
if(jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for(int i = 0; i < SIZE; i++){
if(abs(ref[i]-out[i]) > 1.e-6){
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void norm(float *in, float *out, float *mul, int width){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= width || ty >= SIZE/width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
__shared__ float sum;
sum=0.0f;
if(threadIdx.x==1 && threadIdx.y==1){
for(int i = 0; i < BLOCK_SIZE; i++){
for(int j = 0; j < BLOCK_SIZE; j++){
sum += in[start + i * width + j] * mul[j];
}
}
}
__syncthreads();
if(tx % 2 == 0 && ty % 2 == 0)
out[tx * width + ty] = 2.0 * in[tx * width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 0)
out[tx * width + ty] = in[tx * width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 1)
out[tx * width + ty] = (-1.0) * in[tx * width + ty]/sum;
else
out[tx * width + ty] = 0.0f;
}
int main(){
float *hA_in = (float *)malloc(SIZE * sizeof(float));
float *hA_out = (float *)malloc(SIZE * sizeof(float));
float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *dA_in, *dA_out, *dB_in;
srand(2016);
for(int i = 0; i < SIZE; i++){
hA_in[i] = (float)rand()/(float)RAND_MAX;
}
for(int i = 0; i < BLOCK_SIZE; i++){
hB_in[i] = (float)rand()/(float)RAND_MAX;
}
hipMalloc((void **)&dA_in, SIZE * sizeof(float));
hipMalloc((void **)&dA_out, SIZE * sizeof(float));
hipMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
hipMemcpy(dA_in, hA_in, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), hipMemcpyHostToDevice);
struct timespec start, end;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &start);
hipLaunchKernelGGL((
norm), dim3(grid), dim3(block), 0, 0, dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end);
printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9);
hipMemcpy(hA_out, dA_out, SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
| 682b79de8cb7fbeb851c808cf0d32d277551ef16.cu | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 160
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width){
for(int i = 0 ; i < GRID_SIZE; i++){
for(int j = 0; j < GRID_SIZE; j++){
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
sum += in[start + ii * width + jj] * mul[jj];
}
}
for(int ii = 0; ii < BLOCK_SIZE; ii++){
for(int jj = 0; jj < BLOCK_SIZE; jj++){
if(jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else if(jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii]/sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for(int i = 0; i < SIZE; i++){
if(abs(ref[i]-out[i]) > 1.e-6){
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void norm(float *in, float *out, float *mul, int width){
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= width || ty >= SIZE/width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
__shared__ float sum;
sum=0.0f;
if(threadIdx.x==1 && threadIdx.y==1){
for(int i = 0; i < BLOCK_SIZE; i++){
for(int j = 0; j < BLOCK_SIZE; j++){
sum += in[start + i * width + j] * mul[j];
}
}
}
__syncthreads();
if(tx % 2 == 0 && ty % 2 == 0)
out[tx * width + ty] = 2.0 * in[tx * width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 0)
out[tx * width + ty] = in[tx * width + ty]/sum;
else if(tx % 2 == 1 && ty % 2 == 1)
out[tx * width + ty] = (-1.0) * in[tx * width + ty]/sum;
else
out[tx * width + ty] = 0.0f;
}
int main(){
float *hA_in = (float *)malloc(SIZE * sizeof(float));
float *hA_out = (float *)malloc(SIZE * sizeof(float));
float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *dA_in, *dA_out, *dB_in;
srand(2016);
for(int i = 0; i < SIZE; i++){
hA_in[i] = (float)rand()/(float)RAND_MAX;
}
for(int i = 0; i < BLOCK_SIZE; i++){
hB_in[i] = (float)rand()/(float)RAND_MAX;
}
cudaMalloc((void **)&dA_in, SIZE * sizeof(float));
cudaMalloc((void **)&dA_out, SIZE * sizeof(float));
cudaMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
cudaMemcpy(dA_in, hA_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), cudaMemcpyHostToDevice);
struct timespec start, end;
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &start);
norm<<<grid, block>>>(dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, &end);
printf("kernel time %fs\n", end.tv_sec - start.tv_sec + (end.tv_nsec - start.tv_nsec)/1.e9);
cudaMemcpy(hA_out, dA_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
|
c479adfb3994e2887c60283c7eedec517760855b.hip | // !!! This is a file automatically generated by hipify!!!
#include "LinearLayer.h"
#include "CudaContext.h"
#include <iostream>
#include "Helpers.h"
using std::cout;
using std::endl;
LinearLayer::LinearLayer(int inSize, int outSize, int batchSize, int seqLength): LayerBase(inSize, outSize, batchSize, seqLength)
{
_w = std::make_unique<NeuroWeight>(outSize, inSize);
_b = std::make_unique<NeuroWeight>(outSize, 1, 1);
_output = std::make_unique<DeviceMatrix>(outSize, batchSize, seqLength);
_sensitivity = std::make_unique<DeviceMatrix>(inSize, batchSize, seqLength);
_identity = std::make_unique<DeviceMatrix>(batchSize, 1, 1);
}
void LinearLayer::TransferStatesToDevice(std::vector<WeightSyncContainer*>& states)
{
if (states.size() != 2) throw RetiaException("State vector should have the length of exactly 2");
_w->TransferStateToDevice(*states[0]);
_b->TransferStateToDevice(*states[1]);
}
void LinearLayer::TransferStatesToHost(std::vector<WeightSyncContainer*>& states)
{
if (states.size() != 2) throw RetiaException("State vector should have the length of exactly 2");
_w->TransferStateToHost(*states[0]);
_b->TransferStateToHost(*states[1]);
}
void LinearLayer::ForwardSequence(DeviceMatrix& input)
{
/*cout << "Linear input" << endl;
PrintMatrix(input);*/
_output->TileFrom(_b->weight());
_output->Accumulate(_w->weight(), input, 1.0f);
/*cout << "Linear output" << endl;
PrintMatrix(*_output);*/
}
void LinearLayer::BackpropSequence(DeviceMatrix& input, DeviceMatrix& outSens)
{
_w->gradient().ZeroMemory();
_b->gradient().ZeroMemory();
_sensitivity->ZeroMemory();
for (int i = _seqLen - 1; i >= 0; --i)
{
auto curOutSens = outSens.GetSequenceElement(i);
auto curInput = input.GetSequenceElement(i);
auto curInSens = _sensitivity->GetSequenceElement(i);
_w->gradient().Accumulate(curOutSens, curInput, 1.0f, 1.0f, HIPBLAS_OP_N, HIPBLAS_OP_T);
_b->gradient().Accumulate(curOutSens, *_identity, 1.0f);
}
_sensitivity->Accumulate(_w->weight(), outSens, 0.0f, 1.0f, HIPBLAS_OP_T);
}
void LinearLayer::Optimize(OptimizerBase& optimizer)
{
optimizer.Optimize(*_w);
optimizer.Optimize(*_b);
}
void LinearLayer::ResetMemory()
{
}
void LinearLayer::ResetOptimizerCache()
{
_w->ClearCache();
_b->ClearCache();
}
| c479adfb3994e2887c60283c7eedec517760855b.cu | #include "LinearLayer.h"
#include "CudaContext.h"
#include <iostream>
#include "Helpers.h"
using std::cout;
using std::endl;
LinearLayer::LinearLayer(int inSize, int outSize, int batchSize, int seqLength): LayerBase(inSize, outSize, batchSize, seqLength)
{
_w = std::make_unique<NeuroWeight>(outSize, inSize);
_b = std::make_unique<NeuroWeight>(outSize, 1, 1);
_output = std::make_unique<DeviceMatrix>(outSize, batchSize, seqLength);
_sensitivity = std::make_unique<DeviceMatrix>(inSize, batchSize, seqLength);
_identity = std::make_unique<DeviceMatrix>(batchSize, 1, 1);
}
void LinearLayer::TransferStatesToDevice(std::vector<WeightSyncContainer*>& states)
{
if (states.size() != 2) throw RetiaException("State vector should have the length of exactly 2");
_w->TransferStateToDevice(*states[0]);
_b->TransferStateToDevice(*states[1]);
}
void LinearLayer::TransferStatesToHost(std::vector<WeightSyncContainer*>& states)
{
if (states.size() != 2) throw RetiaException("State vector should have the length of exactly 2");
_w->TransferStateToHost(*states[0]);
_b->TransferStateToHost(*states[1]);
}
void LinearLayer::ForwardSequence(DeviceMatrix& input)
{
/*cout << "Linear input" << endl;
PrintMatrix(input);*/
_output->TileFrom(_b->weight());
_output->Accumulate(_w->weight(), input, 1.0f);
/*cout << "Linear output" << endl;
PrintMatrix(*_output);*/
}
void LinearLayer::BackpropSequence(DeviceMatrix& input, DeviceMatrix& outSens)
{
_w->gradient().ZeroMemory();
_b->gradient().ZeroMemory();
_sensitivity->ZeroMemory();
for (int i = _seqLen - 1; i >= 0; --i)
{
auto curOutSens = outSens.GetSequenceElement(i);
auto curInput = input.GetSequenceElement(i);
auto curInSens = _sensitivity->GetSequenceElement(i);
_w->gradient().Accumulate(curOutSens, curInput, 1.0f, 1.0f, CUBLAS_OP_N, CUBLAS_OP_T);
_b->gradient().Accumulate(curOutSens, *_identity, 1.0f);
}
_sensitivity->Accumulate(_w->weight(), outSens, 0.0f, 1.0f, CUBLAS_OP_T);
}
void LinearLayer::Optimize(OptimizerBase& optimizer)
{
optimizer.Optimize(*_w);
optimizer.Optimize(*_b);
}
void LinearLayer::ResetMemory()
{
}
void LinearLayer::ResetOptimizerCache()
{
_w->ClearCache();
_b->ClearCache();
}
|
40e33508eb5f4b9e4698ea049714ff26656c3b3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "gauss_newton_solver.h"
#include "util.h"
#include "jacobian_util.h"
#include "device_util.h"
#include "device_array.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
/**
* Compute Jacobian matrix for parametric model w.r.t fov of virtual camera, Rotation, Translation, , , ,
* (, , ) are parametric face model Eigen basis scaling factors
* is illumination model (Spherical harmonics)
*
* Optimization energy
* P = (intrinsics, pose, , , , )
* E(P) = w_col * E_col(P) + w_lan * E_lan(P) + w_reg * E_reg(P)
*
*/
__global__ void cuComputeJacobianSparseDense(
// Shared memory
FaceBoundingBox face_bb,
const int nFeatures, const int imageWidth, const int imageHeight,
const int nFaceCoeffs, const int nPixels, const int n,
const int nShapeCoeffs, const int nExpressionCoeffs, const int nAlbedoCoeffs,
const int nUnknowns, const int nResiduals,
const int nVerticesTimes3, const int nShapeCoeffsTotal, const int nExpressionCoeffsTotal, const int nAlbedoCoeffsTotal,
const float wSparse, float wDense, const float wReg,
uchar* image,
glm::mat4 face_pose, glm::mat3 drx, glm::mat3 dry, glm::mat3 drz, glm::mat4 projection, Eigen::Matrix3f jacobian_local,
// Device memory input
int* prior_local_ids, glm::vec3* current_face, glm::vec2* sparse_features,
float* p_shape_basis,
float* p_expression_basis,
float* p_albedo_basis,
float* p_coefficients_shape,
float* p_coefficients_expression,
float* p_coefficients_albedo,
float* p_coefficients_sh,
hipTextureObject_t rgb,
hipTextureObject_t barycentrics,
hipTextureObject_t vertex_ids,
// Device memory output
float* p_jacobian, float* p_residuals)
{
int i = util::getThreadIndex1D();
if (i >= n)
{
return;
}
Eigen::Map<Eigen::MatrixXf> jacobian(p_jacobian, nResiduals, nUnknowns);
Eigen::Map<Eigen::VectorXf> residuals(p_residuals, nResiduals);
Eigen::Map<Eigen::MatrixXf> shape_basis(p_shape_basis, nVerticesTimes3, nShapeCoeffsTotal);
Eigen::Map<Eigen::MatrixXf> expression_basis(p_expression_basis, nVerticesTimes3, nExpressionCoeffsTotal);
Eigen::Map<Eigen::MatrixXf> albedo_basis(p_albedo_basis, nVerticesTimes3, nAlbedoCoeffsTotal);
// Regularization terms based on the assumption of a normal distributed population
if (i >= nFeatures + nPixels)
{
int offset_rows = nFeatures * 2 + nPixels * 3;
int offset_cols = 7;
const int current_index = i - nFeatures - nPixels;
const int expression_shift = nShapeCoeffs;
const int albedo_shift = nShapeCoeffs + nExpressionCoeffs;
float coefficient = 0.0f;
int relative_index = current_index;
// Shape
if (current_index < expression_shift)
{
coefficient = p_coefficients_shape[relative_index];
}
// Expression
else if (current_index < albedo_shift)
{
offset_rows += expression_shift;
offset_cols += expression_shift;
relative_index -= expression_shift;
coefficient = p_coefficients_expression[relative_index];
}
// Albedo
else
{
offset_rows += albedo_shift;
offset_cols += albedo_shift;
relative_index -= albedo_shift;
coefficient = p_coefficients_albedo[relative_index];
}
jacobian(offset_rows + relative_index, offset_cols + relative_index) = wReg;
residuals(offset_rows + relative_index) = coefficient * wReg;
return;
}
/*
* Photo-Consistency dense energy term
* E = sum(norm_l2(C_S - C_I))
* where C_S is synthesized image
* C_I is input RGB image
*
*/
if (i >= nFeatures)
{
int offset_rows = nFeatures * 2;
const int current_index = i - nFeatures;
unsigned int xp = current_index % face_bb.width + face_bb.x_min;
unsigned int yp = current_index / face_bb.width + face_bb.y_min;
int background_index = 3 * (xp + yp * imageWidth);
int ygl = imageHeight - 1 - yp; // "height - 1 - index.y" OpenGL uses left-bottom corner as texture origin.
float4 rgb_sampled = tex2D<float4>(rgb, xp, ygl);
if (rgb_sampled.w < 1.0f) // pixel is not covered by face
{
return;
}
float4 barycentrics_sampled = tex2D<float4>(barycentrics, xp, ygl);
int4 vertex_ids_sampled = tex2D<int4>(vertex_ids, xp, ygl);
Eigen::Map<Eigen::Vector3f> face_rgb(reinterpret_cast<float*>(&rgb_sampled));
Eigen::Vector3f frame_rgb;
frame_rgb.x() = image[background_index] / 255.0f;
frame_rgb.y() = image[background_index + 1] / 255.0f;
frame_rgb.z() = image[background_index + 2] / 255.0f;
Eigen::Vector3f residual = face_rgb - frame_rgb;
// IRLS with L1 norm.
wDense /= glm::sqrt(glm::max(residual.norm(), 1.0e-8f));
residuals.block(offset_rows + current_index * 3, 0, 3, 1) = residual * wDense;
/*
* Energy derivation
* dE/dC_S
* The derivative with respect to synthesized image
* Fragment shader derivation
*
* Albedo derivation
* Color = Light * Albedo
* Albedo = A(E_alb_A * ) + B(E_alb_B * ) + C(E_alb_C * ) => barycentric coordinates
* dColor/dAlbedo
*/
jacobian.block(offset_rows + current_index * 3, 7 + nShapeCoeffs + nExpressionCoeffs, 3, nAlbedoCoeffs) =
(barycentrics_sampled.w * wDense * barycentrics_sampled.x) * albedo_basis.block(3 * vertex_ids_sampled.x, 0, 3, nAlbedoCoeffs) +
(barycentrics_sampled.w * wDense * barycentrics_sampled.y) * albedo_basis.block(3 * vertex_ids_sampled.y, 0, 3, nAlbedoCoeffs) +
(barycentrics_sampled.w * wDense * barycentrics_sampled.z) * albedo_basis.block(3 * vertex_ids_sampled.z, 0, 3, nAlbedoCoeffs);
/*
* Spherical harmonics derivation
* Color = SH(normal) * Albedo
* dSH/d{9 coefficients}
*
* see file /shaders/face.frag computeSH(normal)
*/
auto number_of_vertices = nVerticesTimes3 / 3;
auto albedos = current_face + number_of_vertices;
auto normals = current_face + 2 * number_of_vertices;
auto normal_a_unnorm_glm = glm::mat3(face_pose) * normals[vertex_ids_sampled.x];
auto normal_b_unnorm_glm = glm::mat3(face_pose) * normals[vertex_ids_sampled.y];
auto normal_c_unnorm_glm = glm::mat3(face_pose) * normals[vertex_ids_sampled.z];
auto normal_a_glm = glm::normalize(normal_a_unnorm_glm);
auto normal_b_glm = glm::normalize(normal_b_unnorm_glm);
auto normal_c_glm = glm::normalize(normal_c_unnorm_glm);
auto albedo_glm = barycentrics_sampled.x * albedos[vertex_ids_sampled.x] + barycentrics_sampled.y * albedos[vertex_ids_sampled.y] + barycentrics_sampled.z * albedos[vertex_ids_sampled.z];
auto normal_unnorm_glm = barycentrics_sampled.x * normal_a_glm + barycentrics_sampled.y * normal_b_glm + barycentrics_sampled.z * normal_c_glm;
auto normal_glm = glm::normalize(normal_unnorm_glm);
Eigen::Vector3f albedo;
albedo << albedo_glm.x, albedo_glm.y, albedo_glm.z;
// dSH/d{9 coefficients}
Eigen::Matrix<float, 1, 9> bands(9);
bands(0, 0) = 1.0f;
bands(0, 1) = normal_glm.y;
bands(0, 2) = normal_glm.z;
bands(0, 3) = normal_glm.x;
bands(0, 4) = normal_glm.x * normal_glm.y;
bands(0, 5) = normal_glm.y * normal_glm.z;
bands(0, 6) = 3.0f * normal_glm.z * normal_glm.z - 1.0f;
bands(0, 7) = normal_glm.x * normal_glm.z;
bands(0, 8) = normal_glm.x * normal_glm.x - normal_glm.y * normal_glm.y;
jacobian.block<3, 9>(offset_rows + current_index * 3, 7 + nShapeCoeffs + nExpressionCoeffs + nAlbedoCoeffs) = wDense * albedo * bands;
/*
* Expression and shape derivations
* Triangle = (A, B, C)
* Normal{X, Y, Z} = normalize(cross(B - A, C - A))
* Color = SH(Normal) * Albedo
* @ref https://www.lighthouse3d.com/tutorials/glsl-12-tutorial/normalization-issues/
*
* dColor/d and dColor/d
*
* Example of chain rule:
* dColor/d = dColor/dSH * dSH/dNormal * dNormal/dNormalize() * dNormalize/dCross() * dCross()/d{(B - A), (C - A), A}
*/
Eigen::Matrix<float, 1, 3> dlight_dnormal;
jacobian_util::computeDLightDNormal(dlight_dnormal, normal_glm, p_coefficients_sh);
Eigen::Matrix<float, 3, 3> dnormal_dunnormnormal;
jacobian_util::computeNormalizationJacobian(dnormal_dunnormnormal, normal_unnorm_glm);
Eigen::Matrix<float, 3, 3> unnormnormal_jacobian = albedo * dlight_dnormal * dnormal_dunnormnormal;
Eigen::Matrix<float, 3, 3> v0_jacobian;
Eigen::Matrix<float, 3, 3> v1_jacobian;
Eigen::Matrix<float, 3, 3> v2_jacobian;
jacobian_util::computeNormalJacobian(v0_jacobian, v1_jacobian, v2_jacobian,
current_face[vertex_ids_sampled.x], current_face[vertex_ids_sampled.y], current_face[vertex_ids_sampled.z]);
unnormnormal_jacobian = wDense * unnormnormal_jacobian * jacobian_local;
v0_jacobian = unnormnormal_jacobian * v0_jacobian;
v1_jacobian = unnormnormal_jacobian * v1_jacobian;
v2_jacobian = unnormnormal_jacobian * v2_jacobian;
// dColor/d
jacobian.block(offset_rows + current_index * 3, 7, 3, nShapeCoeffs) =
v0_jacobian * shape_basis.block(3 * vertex_ids_sampled.x, 0, 3, nShapeCoeffs) +
v1_jacobian * shape_basis.block(3 * vertex_ids_sampled.y, 0, 3, nShapeCoeffs) +
v2_jacobian * shape_basis.block(3 * vertex_ids_sampled.z, 0, 3, nShapeCoeffs);
// dColor/d
jacobian.block(offset_rows + current_index * 3, 7 + nShapeCoeffs, 3, nExpressionCoeffs) =
v0_jacobian * expression_basis.block(3 * vertex_ids_sampled.x, 0, 3, nExpressionCoeffs) +
v1_jacobian * expression_basis.block(3 * vertex_ids_sampled.y, 0, 3, nExpressionCoeffs) +
v2_jacobian * expression_basis.block(3 * vertex_ids_sampled.z, 0, 3, nExpressionCoeffs);
Eigen::Matrix<float, 3, 3> dnormal_dunnormnormal_sum = Eigen::MatrixXf::Zero(3, 3);
// For 1st vertex normal
jacobian_util::computeNormalizationJacobian(dnormal_dunnormnormal, normal_a_unnorm_glm);
dnormal_dunnormnormal_sum += barycentrics_sampled.x * dnormal_dunnormnormal;
// For 2nd vertex normal
jacobian_util::computeNormalizationJacobian(dnormal_dunnormnormal, normal_b_unnorm_glm);
dnormal_dunnormnormal_sum += barycentrics_sampled.y * dnormal_dunnormnormal;
// For 3rd vertex normal
jacobian_util::computeNormalizationJacobian(dnormal_dunnormnormal, normal_c_unnorm_glm);
dnormal_dunnormnormal_sum += barycentrics_sampled.z * dnormal_dunnormnormal;
Eigen::Matrix<float, 3, 3> jacobian_rotation;
auto dx = drx * normals[vertex_ids_sampled.x];
auto dy = dry * normals[vertex_ids_sampled.y];
auto dz = drz * normals[vertex_ids_sampled.z];
jacobian_rotation <<
dx[0], dy[0], dz[0],
dx[1], dy[1], dz[1],
dx[2], dy[2], dz[2];
jacobian.block<3, 3>(offset_rows + current_index * 3, 1) = unnormnormal_jacobian * dnormal_dunnormnormal_sum * jacobian_rotation * wDense;
/*
* Energy derivation
* -dE/dC_I
* The derivative with respect to source image (frame_rgb)
* Full perspective derivation
* @ref http://www.songho.ca/opengl/gl_transform.html
*/
// Take into account barycentric interpolation in the fragment shader for vertices and their attributes
auto local_coord =
barycentrics_sampled.x * current_face[vertex_ids_sampled.x] +
barycentrics_sampled.y * current_face[vertex_ids_sampled.y] +
barycentrics_sampled.z * current_face[vertex_ids_sampled.z];
auto world_coord = face_pose * glm::vec4(local_coord, 1.0f);
auto proj_coord = projection * world_coord;
// Derivative of source image (screen coordinate system) with respect to (u,v)
// TODO: Check for boundary for xp and yp
Eigen::Matrix<float, 3, 2> jacobian_uv;
int background_index_left = 3 * (xp - 1 + yp * imageWidth);
int background_index_right = 3 * (xp + 1 + yp * imageWidth);
int background_index_up = 3 * (xp + (yp - 1) * imageWidth);
int background_index_down = 3 * (xp + (yp + 1) * imageWidth);
/*
* Central difference derivation for color ([c(i+1)-c(i-1)]/2p) with viewport transformation derivation
* Color => Screen => NDC
*/
// dColor/du
jacobian_uv(0, 0) = -(image[background_index_right] / 255.0f - image[background_index_left] / 255.0f) * 0.25f * imageWidth;
jacobian_uv(1, 0) = -(image[background_index_right + 1] / 255.0f - image[background_index_left + 1] / 255.0f) * 0.25f * imageWidth;
jacobian_uv(2, 0) = -(image[background_index_right + 2] / 255.0f - image[background_index_left + 2] / 255.0f) * 0.25f * imageWidth;
// dColor/dv
jacobian_uv(0, 1) = (image[background_index_down] / 255.0f - image[background_index_up] / 255.0f) * 0.25f * imageHeight;
jacobian_uv(1, 1) = (image[background_index_down + 1] / 255.0f - image[background_index_up + 1] / 255.0f) * 0.25f * imageHeight;
jacobian_uv(2, 1) = (image[background_index_down + 2] / 255.0f - image[background_index_up + 2] / 255.0f) * 0.25f * imageHeight;
// Jacobian for homogenization (AKA division by w)
// NCD => Clip coordinates
Eigen::Matrix<float, 2, 3> jacobian_proj;
auto one_over_wp = 1.0f / proj_coord.w;
jacobian_proj(0, 0) = one_over_wp;
jacobian_proj(0, 1) = 0.0f;
jacobian_proj(0, 2) = -proj_coord.x * one_over_wp * one_over_wp;
jacobian_proj(1, 0) = 0.0f;
jacobian_proj(1, 1) = one_over_wp;
jacobian_proj(1, 2) = -proj_coord.y * one_over_wp * one_over_wp;
/*
* Jacobian for projection
* Clip coordinates => Eye coordinates (which is identity matrix I, since the camera is assumed to be at the origin)
* dProjection/dX_world
* dProjection/dY_world
* dProjection/dW_world
*/
Eigen::Matrix<float, 3, 3> jacobian_world = Eigen::MatrixXf::Zero(3, 3);
jacobian_world(0, 0) = projection[0][0];
jacobian_world(1, 1) = projection[1][1];
jacobian_world(2, 2) = -1.0f;
/*
* Jacobian for intrinsics (change of fov in our virtual camera)
* dPerspectiveRH_NO/dFov
* @ref glm::perspectiveRH_NO()
*/
Eigen::Matrix<float, 3, 1> jacobian_intrinsics = Eigen::MatrixXf::Zero(3, 1);
jacobian_intrinsics(0, 0) = world_coord.x;
jacobian.block<3, 1>(offset_rows + current_index * 3, 0) = jacobian_uv * jacobian_proj * jacobian_intrinsics * wDense;
/*
* Derivative of world coordinates with respect to rotation coefficients
* Since this node (world space) in our computation graph is common for [R, T] as well as expression and shape
* we can branch the calculations out and derive jacobian_pose first.
* World coordinates => Local coordinates
* X_world = R * X_local + T
* dX_world/dR and dX_world/dT
*/
dx = drx * local_coord;
dy = dry * local_coord;
dz = drz * local_coord;
Eigen::Matrix<float, 3, 6> jacobian_pose = Eigen::MatrixXf::Zero(3, 6);
jacobian_pose(0, 3) = 1.0f;
jacobian_pose(1, 4) = 1.0f;
jacobian_pose(2, 5) = 1.0f;
jacobian_pose(0, 0) = dx[0];
jacobian_pose(1, 0) = dx[1];
jacobian_pose(2, 0) = dx[2];
jacobian_pose(0, 1) = dy[0];
jacobian_pose(1, 1) = dy[1];
jacobian_pose(2, 1) = dy[2];
jacobian_pose(0, 2) = dz[0];
jacobian_pose(1, 2) = dz[1];
jacobian_pose(2, 2) = dz[2];
auto jacobian_proj_world = jacobian_uv * jacobian_proj * jacobian_world;
jacobian.block<3, 6>(offset_rows + current_index * 3, 1) += jacobian_proj_world * jacobian_pose * wDense;
/*
* Derivative of world coordinates with respect to local coordinates.
* This is the rotation matrix.
* X_world = R * X_local + T
* dX_world/dX_local = R
*/
auto jacobian_proj_world_local = jacobian_proj_world * jacobian_local * wDense;
// Derivative of local coordinates with respect to shape and expression parameters
jacobian.block(offset_rows + current_index * 3, 7, 3, nShapeCoeffs) +=
(jacobian_proj_world_local * barycentrics_sampled.x) * shape_basis.block(3 * vertex_ids_sampled.x, 0, 3, nShapeCoeffs) +
(jacobian_proj_world_local * barycentrics_sampled.y) * shape_basis.block(3 * vertex_ids_sampled.y, 0, 3, nShapeCoeffs) +
(jacobian_proj_world_local * barycentrics_sampled.z) * shape_basis.block(3 * vertex_ids_sampled.z, 0, 3, nShapeCoeffs);
jacobian.block(offset_rows + current_index * 3, 7 + nShapeCoeffs, 3, nExpressionCoeffs) +=
(jacobian_proj_world_local * barycentrics_sampled.x) * expression_basis.block(3 * vertex_ids_sampled.x, 0, 3, nExpressionCoeffs) +
(jacobian_proj_world_local * barycentrics_sampled.y) * expression_basis.block(3 * vertex_ids_sampled.y, 0, 3, nExpressionCoeffs) +
(jacobian_proj_world_local * barycentrics_sampled.z) * expression_basis.block(3 * vertex_ids_sampled.z, 0, 3, nExpressionCoeffs);
return;
}
/*
* Sparse terms for Feature Alignment
* Feature similarity between a set of salient facial feature point pairs detect
*
* E = sum(l2_norm(f - ((local_coord))^2)
* where (()) is full perspective projection
*/
auto vertex_id = prior_local_ids[i];
auto local_coord = current_face[vertex_id];
auto world_coord = face_pose * glm::vec4(local_coord, 1.0f);
auto proj_coord = projection * world_coord;
auto uv = glm::vec2(proj_coord.x, proj_coord.y) / proj_coord.w;
// Residual
auto residual = uv - sparse_features[i];
residuals(i * 2) = residual.x * wSparse;
residuals(i * 2 + 1) = residual.y * wSparse;
// Jacobians follow the same description like in the case of dense features
// Jacobian for homogenization (AKA division by w)
Eigen::Matrix<float, 2, 3> jacobian_proj;
auto one_over_wp = 1.0f / proj_coord.w;
jacobian_proj(0, 0) = one_over_wp;
jacobian_proj(0, 1) = 0.0f;
jacobian_proj(0, 2) = -proj_coord.x * one_over_wp * one_over_wp;
jacobian_proj(1, 0) = 0.0f;
jacobian_proj(1, 1) = one_over_wp;
jacobian_proj(1, 2) = -proj_coord.y * one_over_wp * one_over_wp;
// Jacobian for projection
Eigen::Matrix<float, 3, 3> jacobian_world = Eigen::MatrixXf::Zero(3, 3);
jacobian_world(0, 0) = projection[0][0];
jacobian_world(1, 1) = projection[1][1];
jacobian_world(2, 2) = -1.0f;
// Jacobian for intrinsics
Eigen::Matrix<float, 3, 1> jacobian_intrinsics = Eigen::MatrixXf::Zero(3, 1);
jacobian_intrinsics(0, 0) = world_coord.x;
jacobian.block<2, 1>(i * 2, 0) = jacobian_proj * jacobian_intrinsics * wSparse;
// Derivative of world coordinates with respect to rotation coefficients
auto dx = drx * local_coord;
auto dy = dry * local_coord;
auto dz = drz * local_coord;
Eigen::Matrix<float, 3, 6> jacobian_pose = Eigen::MatrixXf::Zero(3, 6);
jacobian_pose(0, 3) = 1.0f;
jacobian_pose(1, 4) = 1.0f;
jacobian_pose(2, 5) = 1.0f;
jacobian_pose(0, 0) = dx[0];
jacobian_pose(1, 0) = dx[1];
jacobian_pose(2, 0) = dx[2];
jacobian_pose(0, 1) = dy[0];
jacobian_pose(1, 1) = dy[1];
jacobian_pose(2, 1) = dy[2];
jacobian_pose(0, 2) = dz[0];
jacobian_pose(1, 2) = dz[1];
jacobian_pose(2, 2) = dz[2];
auto jacobian_proj_world = jacobian_proj * jacobian_world * wSparse;
jacobian.block<2, 6>(i * 2, 1) = jacobian_proj_world * jacobian_pose;
// Derivative of world coordinates with respect to local coordinates.
// This is basically the rotation matrix.
auto jacobian_proj_world_local = jacobian_proj_world * jacobian_local;
// Derivative of local coordinates with respect to shape and expression parameters
// This is basically the corresponding (to unique vertices we have chosen) rows of basis matrices.
auto jacobian_shape = jacobian_proj_world_local * shape_basis.block(3 * vertex_id, 0, 3, nShapeCoeffs);
jacobian.block(i * 2, 7, 2, nShapeCoeffs) = jacobian_shape;
auto jacobian_expression = jacobian_proj_world_local * expression_basis.block(3 * vertex_id, 0, 3, nExpressionCoeffs);
jacobian.block(i * 2, 7 + nShapeCoeffs, 2, nExpressionCoeffs) = jacobian_expression;
}
__global__ void cuComputeVisiblePixelsAndBB(hipTextureObject_t texture, FaceBoundingBox* face_bb, int width, int height)
{
auto index = util::getThreadIndex2D();
if (index.x >= width || index.y >= height)
{
return;
}
int y = height - 1 - index.y; // "height - 1 - index.y" is used since OpenGL uses left-bottom corner as texture origin.
float4 color = tex2D<float4>(texture, index.x, y);
if (color.w > 0.0f)
{
atomicInc(&face_bb->num_visible_pixels, UINT32_MAX);
atomicMin(&face_bb->x_min, index.x);
atomicMin(&face_bb->y_min, index.y);
atomicMax(&face_bb->x_max, index.x);
atomicMax(&face_bb->y_max, index.y);
}
}
FaceBoundingBox GaussNewtonSolver::computeFaceBoundingBox(const int imageWidth, const int imageHeight)
{
FaceBoundingBox bb;
util::copy(m_face_bb, &bb, 1);
//TODO: Arrange this (16,16) according to TitanX when we use it.
dim3 threads_meta(16, 16);
dim3 blocks_meta(imageWidth / threads_meta.x + 1, imageHeight / threads_meta.y + 1);
cuComputeVisiblePixelsAndBB << <blocks_meta, threads_meta >> > (m_texture_rgb, m_face_bb.getPtr(), imageWidth, imageHeight);
util::copy(&bb, m_face_bb, 1);
//std::cout << bb.num_visible_pixels << " " << bb.x_min << " " << bb.y_min << " " << bb.x_max << " " << bb.y_max << std::endl;
if (bb.num_visible_pixels <= 0 || bb.x_min >= bb.x_max || bb.y_min >= bb.y_max)
{
std::cout << "Warning: invalid face bounding box!" << std::endl;
}
bb.width = bb.x_max - bb.x_min;
bb.height = bb.y_max - bb.y_min;
return bb;
}
void GaussNewtonSolver::computeJacobian(
//shared memory
const FaceBoundingBox face_bb,
const int nFeatures, const int imageWidth, const int imageHeight,
const int nShapeCoeffs, const int nExpressionCoeffs, const int nAlbedoCoeffs,
const int nUnknowns, const int nResiduals,
const int nVerticesTimes3, const int nShapeCoeffsTotal, const int nExpressionCoeffsTotal, const int nAlbedoCoeffsTotal, const int nShcoeffsTotal,
float sparseWeight, float denseWeight, float regularizationWeight,
uchar* image,
const glm::mat4& face_pose, const glm::mat3& drx, const glm::mat3& dry, const glm::mat3& drz, const glm::mat4& projection, const Eigen::Matrix3f& jacobian_local,
//device memory input
int* prior_local_ids, glm::vec3* current_face, glm::vec2* sparse_features,
float* p_shape_basis,
float* p_expression_basis,
float* p_albedo_basis,
float* p_coefficients_shape,
float* p_coefficients_expression,
float* p_coefficients_albedo,
float* p_coefficients_sh,
//device memory output
float* p_jacobian, float* p_residuals
) const
{
const int nPixels = face_bb.width * face_bb.height;
const int nFaceCoeffs = nShapeCoeffs + nExpressionCoeffs + nAlbedoCoeffs;
const int n = nFeatures + nPixels + nFaceCoeffs;
//TODO: Fine tune these configs according to TitanX in the end.
const int threads = 128;
const int block = (n + threads - 1) / threads;
auto time = util::runKernelGetExecutionTime([&]() {cuComputeJacobianSparseDense << <block, threads >> > (
//shared memory
face_bb,
nFeatures, imageWidth, imageHeight,
nFaceCoeffs, nPixels, n,
nShapeCoeffs, nExpressionCoeffs, nAlbedoCoeffs,
nUnknowns, nResiduals,
nVerticesTimes3, nShapeCoeffsTotal, nExpressionCoeffsTotal, nAlbedoCoeffsTotal,
glm::sqrt(sparseWeight / nFeatures), glm::sqrt(denseWeight / face_bb.num_visible_pixels), glm::sqrt(regularizationWeight),
image,
face_pose, drx, dry, drz, projection, jacobian_local,
//device memory input
prior_local_ids, current_face, sparse_features,
p_shape_basis,
p_expression_basis,
p_albedo_basis,
p_coefficients_shape,
p_coefficients_expression,
p_coefficients_albedo,
p_coefficients_sh,
m_texture_rgb,
m_texture_barycentrics,
m_texture_vertex_ids,
//device memory output
p_jacobian, p_residuals
);
});
std::cout << "Jacobian kernel time: " << time << std::endl;
hipDeviceSynchronize();
}
__global__ void cuComputeJTJDiagonals(const int nUnknowns, const int nCurrentResiduals, const int nResiduals, float* jacobian, float* preconditioner)
{
int tid = threadIdx.x;
int col = blockIdx.x;
float sum = 0.0f;
for (int row = tid; row < nCurrentResiduals; row += blockDim.x)
{
auto v = jacobian[col * nResiduals + row];
sum += v * v;
}
atomicAdd(&preconditioner[col], sum);
}
__global__ void cuElementwiseMultiplication(float* v1, float* v2, float* out)
{
int i = util::getThreadIndex1D();
out[i] = v1[i] * v2[i];
}
__global__ void cuOneOverElement(float* preconditioner)
{
int i = util::getThreadIndex1D();
preconditioner[i] = 1.0f / glm::max(preconditioner[i], 1.0e-4f);
}
void GaussNewtonSolver::computeJacobiPreconditioner(const int nUnknowns, const int nCurrentResiduals, const int nResiduals, float* jacobian, float* preconditioner)
{
cuComputeJTJDiagonals << <nUnknowns, 128 >> > (nUnknowns, nCurrentResiduals, nResiduals, jacobian, preconditioner);
hipDeviceSynchronize();
cuOneOverElement << <1, nUnknowns >> > (preconditioner);
hipDeviceSynchronize();
}
void GaussNewtonSolver::elementwiseMultiplication(const int nElements, float* v1, float* v2, float* out)
{
cuElementwiseMultiplication << <1, nElements >> > (v1, v2, out);
hipDeviceSynchronize();
} | 40e33508eb5f4b9e4698ea049714ff26656c3b3e.cu | #pragma once
#include "gauss_newton_solver.h"
#include "util.h"
#include "jacobian_util.h"
#include "device_util.h"
#include "device_array.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
/**
* Compute Jacobian matrix for parametric model w.r.t fov of virtual camera, Rotation, Translation, α, β, δ, γ
* (α, β, δ) are parametric face model Eigen basis scaling factors
* γ is illumination model (Spherical harmonics)
*
* Optimization energy
* P = (intrinsics, pose, α, β, δ, γ)
* E(P) = w_col * E_col(P) + w_lan * E_lan(P) + w_reg * E_reg(P)
*
*/
__global__ void cuComputeJacobianSparseDense(
// Shared memory
FaceBoundingBox face_bb,
const int nFeatures, const int imageWidth, const int imageHeight,
const int nFaceCoeffs, const int nPixels, const int n,
const int nShapeCoeffs, const int nExpressionCoeffs, const int nAlbedoCoeffs,
const int nUnknowns, const int nResiduals,
const int nVerticesTimes3, const int nShapeCoeffsTotal, const int nExpressionCoeffsTotal, const int nAlbedoCoeffsTotal,
const float wSparse, float wDense, const float wReg,
uchar* image,
glm::mat4 face_pose, glm::mat3 drx, glm::mat3 dry, glm::mat3 drz, glm::mat4 projection, Eigen::Matrix3f jacobian_local,
// Device memory input
int* prior_local_ids, glm::vec3* current_face, glm::vec2* sparse_features,
float* p_shape_basis,
float* p_expression_basis,
float* p_albedo_basis,
float* p_coefficients_shape,
float* p_coefficients_expression,
float* p_coefficients_albedo,
float* p_coefficients_sh,
cudaTextureObject_t rgb,
cudaTextureObject_t barycentrics,
cudaTextureObject_t vertex_ids,
// Device memory output
float* p_jacobian, float* p_residuals)
{
int i = util::getThreadIndex1D();
if (i >= n)
{
return;
}
Eigen::Map<Eigen::MatrixXf> jacobian(p_jacobian, nResiduals, nUnknowns);
Eigen::Map<Eigen::VectorXf> residuals(p_residuals, nResiduals);
Eigen::Map<Eigen::MatrixXf> shape_basis(p_shape_basis, nVerticesTimes3, nShapeCoeffsTotal);
Eigen::Map<Eigen::MatrixXf> expression_basis(p_expression_basis, nVerticesTimes3, nExpressionCoeffsTotal);
Eigen::Map<Eigen::MatrixXf> albedo_basis(p_albedo_basis, nVerticesTimes3, nAlbedoCoeffsTotal);
// Regularization terms based on the assumption of a normal distributed population
if (i >= nFeatures + nPixels)
{
int offset_rows = nFeatures * 2 + nPixels * 3;
int offset_cols = 7;
const int current_index = i - nFeatures - nPixels;
const int expression_shift = nShapeCoeffs;
const int albedo_shift = nShapeCoeffs + nExpressionCoeffs;
float coefficient = 0.0f;
int relative_index = current_index;
// Shape
if (current_index < expression_shift)
{
coefficient = p_coefficients_shape[relative_index];
}
// Expression
else if (current_index < albedo_shift)
{
offset_rows += expression_shift;
offset_cols += expression_shift;
relative_index -= expression_shift;
coefficient = p_coefficients_expression[relative_index];
}
// Albedo
else
{
offset_rows += albedo_shift;
offset_cols += albedo_shift;
relative_index -= albedo_shift;
coefficient = p_coefficients_albedo[relative_index];
}
jacobian(offset_rows + relative_index, offset_cols + relative_index) = wReg;
residuals(offset_rows + relative_index) = coefficient * wReg;
return;
}
/*
* Photo-Consistency dense energy term
* E = sum(norm_l2(C_S - C_I))
* where C_S is synthesized image
* C_I is input RGB image
*
*/
if (i >= nFeatures)
{
int offset_rows = nFeatures * 2;
const int current_index = i - nFeatures;
unsigned int xp = current_index % face_bb.width + face_bb.x_min;
unsigned int yp = current_index / face_bb.width + face_bb.y_min;
int background_index = 3 * (xp + yp * imageWidth);
int ygl = imageHeight - 1 - yp; // "height - 1 - index.y" OpenGL uses left-bottom corner as texture origin.
float4 rgb_sampled = tex2D<float4>(rgb, xp, ygl);
if (rgb_sampled.w < 1.0f) // pixel is not covered by face
{
return;
}
float4 barycentrics_sampled = tex2D<float4>(barycentrics, xp, ygl);
int4 vertex_ids_sampled = tex2D<int4>(vertex_ids, xp, ygl);
Eigen::Map<Eigen::Vector3f> face_rgb(reinterpret_cast<float*>(&rgb_sampled));
Eigen::Vector3f frame_rgb;
frame_rgb.x() = image[background_index] / 255.0f;
frame_rgb.y() = image[background_index + 1] / 255.0f;
frame_rgb.z() = image[background_index + 2] / 255.0f;
Eigen::Vector3f residual = face_rgb - frame_rgb;
// IRLS with L1 norm.
wDense /= glm::sqrt(glm::max(residual.norm(), 1.0e-8f));
residuals.block(offset_rows + current_index * 3, 0, 3, 1) = residual * wDense;
/*
* Energy derivation
* dE/dC_S
* The derivative with respect to synthesized image
* Fragment shader derivation
*
* Albedo derivation
* Color = Light * Albedo
* Albedo = A(E_alb_A * β) + B(E_alb_B * β) + C(E_alb_C * β) => barycentric coordinates
* dColor/dAlbedo
*/
jacobian.block(offset_rows + current_index * 3, 7 + nShapeCoeffs + nExpressionCoeffs, 3, nAlbedoCoeffs) =
(barycentrics_sampled.w * wDense * barycentrics_sampled.x) * albedo_basis.block(3 * vertex_ids_sampled.x, 0, 3, nAlbedoCoeffs) +
(barycentrics_sampled.w * wDense * barycentrics_sampled.y) * albedo_basis.block(3 * vertex_ids_sampled.y, 0, 3, nAlbedoCoeffs) +
(barycentrics_sampled.w * wDense * barycentrics_sampled.z) * albedo_basis.block(3 * vertex_ids_sampled.z, 0, 3, nAlbedoCoeffs);
/*
* Spherical harmonics derivation
* Color = SH(normal) * Albedo
* dSH/d{9 coefficients}
*
* see file /shaders/face.frag computeSH(normal)
*/
auto number_of_vertices = nVerticesTimes3 / 3;
auto albedos = current_face + number_of_vertices;
auto normals = current_face + 2 * number_of_vertices;
auto normal_a_unnorm_glm = glm::mat3(face_pose) * normals[vertex_ids_sampled.x];
auto normal_b_unnorm_glm = glm::mat3(face_pose) * normals[vertex_ids_sampled.y];
auto normal_c_unnorm_glm = glm::mat3(face_pose) * normals[vertex_ids_sampled.z];
auto normal_a_glm = glm::normalize(normal_a_unnorm_glm);
auto normal_b_glm = glm::normalize(normal_b_unnorm_glm);
auto normal_c_glm = glm::normalize(normal_c_unnorm_glm);
auto albedo_glm = barycentrics_sampled.x * albedos[vertex_ids_sampled.x] + barycentrics_sampled.y * albedos[vertex_ids_sampled.y] + barycentrics_sampled.z * albedos[vertex_ids_sampled.z];
auto normal_unnorm_glm = barycentrics_sampled.x * normal_a_glm + barycentrics_sampled.y * normal_b_glm + barycentrics_sampled.z * normal_c_glm;
auto normal_glm = glm::normalize(normal_unnorm_glm);
Eigen::Vector3f albedo;
albedo << albedo_glm.x, albedo_glm.y, albedo_glm.z;
// dSH/d{9 coefficients}
Eigen::Matrix<float, 1, 9> bands(9);
bands(0, 0) = 1.0f;
bands(0, 1) = normal_glm.y;
bands(0, 2) = normal_glm.z;
bands(0, 3) = normal_glm.x;
bands(0, 4) = normal_glm.x * normal_glm.y;
bands(0, 5) = normal_glm.y * normal_glm.z;
bands(0, 6) = 3.0f * normal_glm.z * normal_glm.z - 1.0f;
bands(0, 7) = normal_glm.x * normal_glm.z;
bands(0, 8) = normal_glm.x * normal_glm.x - normal_glm.y * normal_glm.y;
jacobian.block<3, 9>(offset_rows + current_index * 3, 7 + nShapeCoeffs + nExpressionCoeffs + nAlbedoCoeffs) = wDense * albedo * bands;
/*
* Expression and shape derivations
* Triangle = (A, B, C)
* Normal{X, Y, Z} = normalize(cross(B - A, C - A))
* Color = SH(Normal) * Albedo
* @ref https://www.lighthouse3d.com/tutorials/glsl-12-tutorial/normalization-issues/
*
* dColor/dα and dColor/dδ
*
* Example of chain rule:
* dColor/dα = dColor/dSH * dSH/dNormal * dNormal/dNormalize() * dNormalize/dCross() * dCross()/d{(B - A), (C - A), A}
*/
Eigen::Matrix<float, 1, 3> dlight_dnormal;
jacobian_util::computeDLightDNormal(dlight_dnormal, normal_glm, p_coefficients_sh);
Eigen::Matrix<float, 3, 3> dnormal_dunnormnormal;
jacobian_util::computeNormalizationJacobian(dnormal_dunnormnormal, normal_unnorm_glm);
Eigen::Matrix<float, 3, 3> unnormnormal_jacobian = albedo * dlight_dnormal * dnormal_dunnormnormal;
Eigen::Matrix<float, 3, 3> v0_jacobian;
Eigen::Matrix<float, 3, 3> v1_jacobian;
Eigen::Matrix<float, 3, 3> v2_jacobian;
jacobian_util::computeNormalJacobian(v0_jacobian, v1_jacobian, v2_jacobian,
current_face[vertex_ids_sampled.x], current_face[vertex_ids_sampled.y], current_face[vertex_ids_sampled.z]);
unnormnormal_jacobian = wDense * unnormnormal_jacobian * jacobian_local;
v0_jacobian = unnormnormal_jacobian * v0_jacobian;
v1_jacobian = unnormnormal_jacobian * v1_jacobian;
v2_jacobian = unnormnormal_jacobian * v2_jacobian;
// dColor/dα
jacobian.block(offset_rows + current_index * 3, 7, 3, nShapeCoeffs) =
v0_jacobian * shape_basis.block(3 * vertex_ids_sampled.x, 0, 3, nShapeCoeffs) +
v1_jacobian * shape_basis.block(3 * vertex_ids_sampled.y, 0, 3, nShapeCoeffs) +
v2_jacobian * shape_basis.block(3 * vertex_ids_sampled.z, 0, 3, nShapeCoeffs);
// dColor/dδ
jacobian.block(offset_rows + current_index * 3, 7 + nShapeCoeffs, 3, nExpressionCoeffs) =
v0_jacobian * expression_basis.block(3 * vertex_ids_sampled.x, 0, 3, nExpressionCoeffs) +
v1_jacobian * expression_basis.block(3 * vertex_ids_sampled.y, 0, 3, nExpressionCoeffs) +
v2_jacobian * expression_basis.block(3 * vertex_ids_sampled.z, 0, 3, nExpressionCoeffs);
Eigen::Matrix<float, 3, 3> dnormal_dunnormnormal_sum = Eigen::MatrixXf::Zero(3, 3);
// For 1st vertex normal
jacobian_util::computeNormalizationJacobian(dnormal_dunnormnormal, normal_a_unnorm_glm);
dnormal_dunnormnormal_sum += barycentrics_sampled.x * dnormal_dunnormnormal;
// For 2nd vertex normal
jacobian_util::computeNormalizationJacobian(dnormal_dunnormnormal, normal_b_unnorm_glm);
dnormal_dunnormnormal_sum += barycentrics_sampled.y * dnormal_dunnormnormal;
// For 3rd vertex normal
jacobian_util::computeNormalizationJacobian(dnormal_dunnormnormal, normal_c_unnorm_glm);
dnormal_dunnormnormal_sum += barycentrics_sampled.z * dnormal_dunnormnormal;
Eigen::Matrix<float, 3, 3> jacobian_rotation;
auto dx = drx * normals[vertex_ids_sampled.x];
auto dy = dry * normals[vertex_ids_sampled.y];
auto dz = drz * normals[vertex_ids_sampled.z];
jacobian_rotation <<
dx[0], dy[0], dz[0],
dx[1], dy[1], dz[1],
dx[2], dy[2], dz[2];
jacobian.block<3, 3>(offset_rows + current_index * 3, 1) = unnormnormal_jacobian * dnormal_dunnormnormal_sum * jacobian_rotation * wDense;
/*
* Energy derivation
* -dE/dC_I
* The derivative with respect to source image (frame_rgb)
* Full perspective derivation
* @ref http://www.songho.ca/opengl/gl_transform.html
*/
// Take into account barycentric interpolation in the fragment shader for vertices and their attributes
auto local_coord =
barycentrics_sampled.x * current_face[vertex_ids_sampled.x] +
barycentrics_sampled.y * current_face[vertex_ids_sampled.y] +
barycentrics_sampled.z * current_face[vertex_ids_sampled.z];
auto world_coord = face_pose * glm::vec4(local_coord, 1.0f);
auto proj_coord = projection * world_coord;
// Derivative of source image (screen coordinate system) with respect to (u,v)
// TODO: Check for boundary for xp and yp
Eigen::Matrix<float, 3, 2> jacobian_uv;
int background_index_left = 3 * (xp - 1 + yp * imageWidth);
int background_index_right = 3 * (xp + 1 + yp * imageWidth);
int background_index_up = 3 * (xp + (yp - 1) * imageWidth);
int background_index_down = 3 * (xp + (yp + 1) * imageWidth);
/*
* Central difference derivation for color ([c(i+1)-c(i-1)]/2p) with viewport transformation derivation
* Color => Screen => NDC
*/
// dColor/du
jacobian_uv(0, 0) = -(image[background_index_right] / 255.0f - image[background_index_left] / 255.0f) * 0.25f * imageWidth;
jacobian_uv(1, 0) = -(image[background_index_right + 1] / 255.0f - image[background_index_left + 1] / 255.0f) * 0.25f * imageWidth;
jacobian_uv(2, 0) = -(image[background_index_right + 2] / 255.0f - image[background_index_left + 2] / 255.0f) * 0.25f * imageWidth;
// dColor/dv
jacobian_uv(0, 1) = (image[background_index_down] / 255.0f - image[background_index_up] / 255.0f) * 0.25f * imageHeight;
jacobian_uv(1, 1) = (image[background_index_down + 1] / 255.0f - image[background_index_up + 1] / 255.0f) * 0.25f * imageHeight;
jacobian_uv(2, 1) = (image[background_index_down + 2] / 255.0f - image[background_index_up + 2] / 255.0f) * 0.25f * imageHeight;
// Jacobian for homogenization (AKA division by w)
// NCD => Clip coordinates
Eigen::Matrix<float, 2, 3> jacobian_proj;
auto one_over_wp = 1.0f / proj_coord.w;
jacobian_proj(0, 0) = one_over_wp;
jacobian_proj(0, 1) = 0.0f;
jacobian_proj(0, 2) = -proj_coord.x * one_over_wp * one_over_wp;
jacobian_proj(1, 0) = 0.0f;
jacobian_proj(1, 1) = one_over_wp;
jacobian_proj(1, 2) = -proj_coord.y * one_over_wp * one_over_wp;
/*
* Jacobian for projection
* Clip coordinates => Eye coordinates (which is identity matrix I, since the camera is assumed to be at the origin)
* dProjection/dX_world
* dProjection/dY_world
* dProjection/dW_world
*/
Eigen::Matrix<float, 3, 3> jacobian_world = Eigen::MatrixXf::Zero(3, 3);
jacobian_world(0, 0) = projection[0][0];
jacobian_world(1, 1) = projection[1][1];
jacobian_world(2, 2) = -1.0f;
/*
* Jacobian for intrinsics (change of fov in our virtual camera)
* dPerspectiveRH_NO/dFov
* @ref glm::perspectiveRH_NO()
*/
Eigen::Matrix<float, 3, 1> jacobian_intrinsics = Eigen::MatrixXf::Zero(3, 1);
jacobian_intrinsics(0, 0) = world_coord.x;
jacobian.block<3, 1>(offset_rows + current_index * 3, 0) = jacobian_uv * jacobian_proj * jacobian_intrinsics * wDense;
/*
* Derivative of world coordinates with respect to rotation coefficients
* Since this node (world space) in our computation graph is common for [R, T] as well as expression and shape
* we can branch the calculations out and derive jacobian_pose first.
* World coordinates => Local coordinates
* X_world = R * X_local + T
* dX_world/dR and dX_world/dT
*/
dx = drx * local_coord;
dy = dry * local_coord;
dz = drz * local_coord;
Eigen::Matrix<float, 3, 6> jacobian_pose = Eigen::MatrixXf::Zero(3, 6);
jacobian_pose(0, 3) = 1.0f;
jacobian_pose(1, 4) = 1.0f;
jacobian_pose(2, 5) = 1.0f;
jacobian_pose(0, 0) = dx[0];
jacobian_pose(1, 0) = dx[1];
jacobian_pose(2, 0) = dx[2];
jacobian_pose(0, 1) = dy[0];
jacobian_pose(1, 1) = dy[1];
jacobian_pose(2, 1) = dy[2];
jacobian_pose(0, 2) = dz[0];
jacobian_pose(1, 2) = dz[1];
jacobian_pose(2, 2) = dz[2];
auto jacobian_proj_world = jacobian_uv * jacobian_proj * jacobian_world;
jacobian.block<3, 6>(offset_rows + current_index * 3, 1) += jacobian_proj_world * jacobian_pose * wDense;
/*
* Derivative of world coordinates with respect to local coordinates.
* This is the rotation matrix.
* X_world = R * X_local + T
* dX_world/dX_local = R
*/
auto jacobian_proj_world_local = jacobian_proj_world * jacobian_local * wDense;
// Derivative of local coordinates with respect to shape and expression parameters
jacobian.block(offset_rows + current_index * 3, 7, 3, nShapeCoeffs) +=
(jacobian_proj_world_local * barycentrics_sampled.x) * shape_basis.block(3 * vertex_ids_sampled.x, 0, 3, nShapeCoeffs) +
(jacobian_proj_world_local * barycentrics_sampled.y) * shape_basis.block(3 * vertex_ids_sampled.y, 0, 3, nShapeCoeffs) +
(jacobian_proj_world_local * barycentrics_sampled.z) * shape_basis.block(3 * vertex_ids_sampled.z, 0, 3, nShapeCoeffs);
jacobian.block(offset_rows + current_index * 3, 7 + nShapeCoeffs, 3, nExpressionCoeffs) +=
(jacobian_proj_world_local * barycentrics_sampled.x) * expression_basis.block(3 * vertex_ids_sampled.x, 0, 3, nExpressionCoeffs) +
(jacobian_proj_world_local * barycentrics_sampled.y) * expression_basis.block(3 * vertex_ids_sampled.y, 0, 3, nExpressionCoeffs) +
(jacobian_proj_world_local * barycentrics_sampled.z) * expression_basis.block(3 * vertex_ids_sampled.z, 0, 3, nExpressionCoeffs);
return;
}
/*
* Sparse terms for Feature Alignment
* Feature similarity between a set of salient facial feature point pairs detect
*
* E = sum(l2_norm(f - Π(Φ(local_coord))^2)
* where Π(Φ()) is full perspective projection
*/
auto vertex_id = prior_local_ids[i];
auto local_coord = current_face[vertex_id];
auto world_coord = face_pose * glm::vec4(local_coord, 1.0f);
auto proj_coord = projection * world_coord;
auto uv = glm::vec2(proj_coord.x, proj_coord.y) / proj_coord.w;
// Residual
auto residual = uv - sparse_features[i];
residuals(i * 2) = residual.x * wSparse;
residuals(i * 2 + 1) = residual.y * wSparse;
// Jacobians follow the same description like in the case of dense features
// Jacobian for homogenization (AKA division by w)
Eigen::Matrix<float, 2, 3> jacobian_proj;
auto one_over_wp = 1.0f / proj_coord.w;
jacobian_proj(0, 0) = one_over_wp;
jacobian_proj(0, 1) = 0.0f;
jacobian_proj(0, 2) = -proj_coord.x * one_over_wp * one_over_wp;
jacobian_proj(1, 0) = 0.0f;
jacobian_proj(1, 1) = one_over_wp;
jacobian_proj(1, 2) = -proj_coord.y * one_over_wp * one_over_wp;
// Jacobian for projection
Eigen::Matrix<float, 3, 3> jacobian_world = Eigen::MatrixXf::Zero(3, 3);
jacobian_world(0, 0) = projection[0][0];
jacobian_world(1, 1) = projection[1][1];
jacobian_world(2, 2) = -1.0f;
// Jacobian for intrinsics
Eigen::Matrix<float, 3, 1> jacobian_intrinsics = Eigen::MatrixXf::Zero(3, 1);
jacobian_intrinsics(0, 0) = world_coord.x;
jacobian.block<2, 1>(i * 2, 0) = jacobian_proj * jacobian_intrinsics * wSparse;
// Derivative of world coordinates with respect to rotation coefficients
auto dx = drx * local_coord;
auto dy = dry * local_coord;
auto dz = drz * local_coord;
Eigen::Matrix<float, 3, 6> jacobian_pose = Eigen::MatrixXf::Zero(3, 6);
jacobian_pose(0, 3) = 1.0f;
jacobian_pose(1, 4) = 1.0f;
jacobian_pose(2, 5) = 1.0f;
jacobian_pose(0, 0) = dx[0];
jacobian_pose(1, 0) = dx[1];
jacobian_pose(2, 0) = dx[2];
jacobian_pose(0, 1) = dy[0];
jacobian_pose(1, 1) = dy[1];
jacobian_pose(2, 1) = dy[2];
jacobian_pose(0, 2) = dz[0];
jacobian_pose(1, 2) = dz[1];
jacobian_pose(2, 2) = dz[2];
auto jacobian_proj_world = jacobian_proj * jacobian_world * wSparse;
jacobian.block<2, 6>(i * 2, 1) = jacobian_proj_world * jacobian_pose;
// Derivative of world coordinates with respect to local coordinates.
// This is basically the rotation matrix.
auto jacobian_proj_world_local = jacobian_proj_world * jacobian_local;
// Derivative of local coordinates with respect to shape and expression parameters
// This is basically the corresponding (to unique vertices we have chosen) rows of basis matrices.
auto jacobian_shape = jacobian_proj_world_local * shape_basis.block(3 * vertex_id, 0, 3, nShapeCoeffs);
jacobian.block(i * 2, 7, 2, nShapeCoeffs) = jacobian_shape;
auto jacobian_expression = jacobian_proj_world_local * expression_basis.block(3 * vertex_id, 0, 3, nExpressionCoeffs);
jacobian.block(i * 2, 7 + nShapeCoeffs, 2, nExpressionCoeffs) = jacobian_expression;
}
__global__ void cuComputeVisiblePixelsAndBB(cudaTextureObject_t texture, FaceBoundingBox* face_bb, int width, int height)
{
auto index = util::getThreadIndex2D();
if (index.x >= width || index.y >= height)
{
return;
}
int y = height - 1 - index.y; // "height - 1 - index.y" is used since OpenGL uses left-bottom corner as texture origin.
float4 color = tex2D<float4>(texture, index.x, y);
if (color.w > 0.0f)
{
atomicInc(&face_bb->num_visible_pixels, UINT32_MAX);
atomicMin(&face_bb->x_min, index.x);
atomicMin(&face_bb->y_min, index.y);
atomicMax(&face_bb->x_max, index.x);
atomicMax(&face_bb->y_max, index.y);
}
}
FaceBoundingBox GaussNewtonSolver::computeFaceBoundingBox(const int imageWidth, const int imageHeight)
{
FaceBoundingBox bb;
util::copy(m_face_bb, &bb, 1);
//TODO: Arrange this (16,16) according to TitanX when we use it.
dim3 threads_meta(16, 16);
dim3 blocks_meta(imageWidth / threads_meta.x + 1, imageHeight / threads_meta.y + 1);
cuComputeVisiblePixelsAndBB << <blocks_meta, threads_meta >> > (m_texture_rgb, m_face_bb.getPtr(), imageWidth, imageHeight);
util::copy(&bb, m_face_bb, 1);
//std::cout << bb.num_visible_pixels << " " << bb.x_min << " " << bb.y_min << " " << bb.x_max << " " << bb.y_max << std::endl;
if (bb.num_visible_pixels <= 0 || bb.x_min >= bb.x_max || bb.y_min >= bb.y_max)
{
std::cout << "Warning: invalid face bounding box!" << std::endl;
}
bb.width = bb.x_max - bb.x_min;
bb.height = bb.y_max - bb.y_min;
return bb;
}
void GaussNewtonSolver::computeJacobian(
//shared memory
const FaceBoundingBox face_bb,
const int nFeatures, const int imageWidth, const int imageHeight,
const int nShapeCoeffs, const int nExpressionCoeffs, const int nAlbedoCoeffs,
const int nUnknowns, const int nResiduals,
const int nVerticesTimes3, const int nShapeCoeffsTotal, const int nExpressionCoeffsTotal, const int nAlbedoCoeffsTotal, const int nShcoeffsTotal,
float sparseWeight, float denseWeight, float regularizationWeight,
uchar* image,
const glm::mat4& face_pose, const glm::mat3& drx, const glm::mat3& dry, const glm::mat3& drz, const glm::mat4& projection, const Eigen::Matrix3f& jacobian_local,
//device memory input
int* prior_local_ids, glm::vec3* current_face, glm::vec2* sparse_features,
float* p_shape_basis,
float* p_expression_basis,
float* p_albedo_basis,
float* p_coefficients_shape,
float* p_coefficients_expression,
float* p_coefficients_albedo,
float* p_coefficients_sh,
//device memory output
float* p_jacobian, float* p_residuals
) const
{
const int nPixels = face_bb.width * face_bb.height;
const int nFaceCoeffs = nShapeCoeffs + nExpressionCoeffs + nAlbedoCoeffs;
const int n = nFeatures + nPixels + nFaceCoeffs;
//TODO: Fine tune these configs according to TitanX in the end.
const int threads = 128;
const int block = (n + threads - 1) / threads;
auto time = util::runKernelGetExecutionTime([&]() {cuComputeJacobianSparseDense << <block, threads >> > (
//shared memory
face_bb,
nFeatures, imageWidth, imageHeight,
nFaceCoeffs, nPixels, n,
nShapeCoeffs, nExpressionCoeffs, nAlbedoCoeffs,
nUnknowns, nResiduals,
nVerticesTimes3, nShapeCoeffsTotal, nExpressionCoeffsTotal, nAlbedoCoeffsTotal,
glm::sqrt(sparseWeight / nFeatures), glm::sqrt(denseWeight / face_bb.num_visible_pixels), glm::sqrt(regularizationWeight),
image,
face_pose, drx, dry, drz, projection, jacobian_local,
//device memory input
prior_local_ids, current_face, sparse_features,
p_shape_basis,
p_expression_basis,
p_albedo_basis,
p_coefficients_shape,
p_coefficients_expression,
p_coefficients_albedo,
p_coefficients_sh,
m_texture_rgb,
m_texture_barycentrics,
m_texture_vertex_ids,
//device memory output
p_jacobian, p_residuals
);
});
std::cout << "Jacobian kernel time: " << time << std::endl;
cudaDeviceSynchronize();
}
__global__ void cuComputeJTJDiagonals(const int nUnknowns, const int nCurrentResiduals, const int nResiduals, float* jacobian, float* preconditioner)
{
int tid = threadIdx.x;
int col = blockIdx.x;
float sum = 0.0f;
for (int row = tid; row < nCurrentResiduals; row += blockDim.x)
{
auto v = jacobian[col * nResiduals + row];
sum += v * v;
}
atomicAdd(&preconditioner[col], sum);
}
__global__ void cuElementwiseMultiplication(float* v1, float* v2, float* out)
{
int i = util::getThreadIndex1D();
out[i] = v1[i] * v2[i];
}
__global__ void cuOneOverElement(float* preconditioner)
{
int i = util::getThreadIndex1D();
preconditioner[i] = 1.0f / glm::max(preconditioner[i], 1.0e-4f);
}
void GaussNewtonSolver::computeJacobiPreconditioner(const int nUnknowns, const int nCurrentResiduals, const int nResiduals, float* jacobian, float* preconditioner)
{
cuComputeJTJDiagonals << <nUnknowns, 128 >> > (nUnknowns, nCurrentResiduals, nResiduals, jacobian, preconditioner);
cudaDeviceSynchronize();
cuOneOverElement << <1, nUnknowns >> > (preconditioner);
cudaDeviceSynchronize();
}
void GaussNewtonSolver::elementwiseMultiplication(const int nElements, float* v1, float* v2, float* out)
{
cuElementwiseMultiplication << <1, nElements >> > (v1, v2, out);
cudaDeviceSynchronize();
} |
f35652bdc20682ef9532a8189b2478e8b68558a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand_kernel.h>
#include "voxel_query_gpu.h"
#include "cuda_utils.h"
__global__ void voxel_query_kernel_stack(int M, int R1, int R2, int R3, int nsample,
float radius, int z_range, int y_range, int x_range, const float *new_xyz,
const float *xyz, const int *new_coords, const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the ball query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
new_xyz += pt_idx * 3;
new_coords += pt_idx * 4;
idx += pt_idx * nsample;
hiprandState_t state;
hiprand_init(pt_idx, 0, 0, &state);
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int batch_idx = new_coords[0];
int new_coords_z = new_coords[1];
int new_coords_y = new_coords[2];
int new_coords_x = new_coords[3];
int cnt = 0;
int cnt2 = 0;
// for (int dz = -1*z_range; dz <= z_range; ++dz) {
for (int dz = -1*z_range; dz <= z_range; ++dz) {
int z_coord = new_coords_z + dz;
if (z_coord < 0 || z_coord >= R1) continue;
for (int dy = -1*y_range; dy <= y_range; ++dy) {
int y_coord = new_coords_y + dy;
if (y_coord < 0 || y_coord >= R2) continue;
for (int dx = -1*x_range; dx <= x_range; ++dx) {
int x_coord = new_coords_x + dx;
if (x_coord < 0 || x_coord >= R3) continue;
int index = batch_idx * R1 * R2 * R3 + \
z_coord * R2 * R3 + \
y_coord * R3 + \
x_coord;
int neighbor_idx = point_indices[index];
if (neighbor_idx < 0) continue;
float x_per = xyz[neighbor_idx*3 + 0];
float y_per = xyz[neighbor_idx*3 + 1];
float z_per = xyz[neighbor_idx*3 + 2];
float dist2 = (x_per - new_x) * (x_per - new_x) + (y_per - new_y) * (y_per - new_y) + (z_per - new_z) * (z_per - new_z);
if (dist2 > radius2) continue;
++cnt2;
if (cnt < nsample) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[l] = neighbor_idx;
}
}
idx[cnt] = neighbor_idx;
++cnt;
}
// else {
// float rnd = hiprand_uniform(&state);
// if (rnd < (float(nsample) / cnt2)) {
// int insertidx = ceilf(hiprand_uniform(&state) * nsample) - 1;
// idx[insertidx] = neighbor_idx;
// }
// }
}
}
}
if (cnt == 0) idx[0] = -1;
}
void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3, int nsample,
float radius, int z_range, int y_range, int x_range, const float *new_xyz,
const float *xyz, const int *new_coords, const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the voxel query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
hipError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( voxel_query_kernel_stack), dim3(blocks), dim3(threads), 0, 0, M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz, new_coords, point_indices, idx);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| f35652bdc20682ef9532a8189b2478e8b68558a0.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h>
#include "voxel_query_gpu.h"
#include "cuda_utils.h"
__global__ void voxel_query_kernel_stack(int M, int R1, int R2, int R3, int nsample,
float radius, int z_range, int y_range, int x_range, const float *new_xyz,
const float *xyz, const int *new_coords, const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the ball query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= M) return;
new_xyz += pt_idx * 3;
new_coords += pt_idx * 4;
idx += pt_idx * nsample;
curandState state;
curand_init(pt_idx, 0, 0, &state);
float radius2 = radius * radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int batch_idx = new_coords[0];
int new_coords_z = new_coords[1];
int new_coords_y = new_coords[2];
int new_coords_x = new_coords[3];
int cnt = 0;
int cnt2 = 0;
// for (int dz = -1*z_range; dz <= z_range; ++dz) {
for (int dz = -1*z_range; dz <= z_range; ++dz) {
int z_coord = new_coords_z + dz;
if (z_coord < 0 || z_coord >= R1) continue;
for (int dy = -1*y_range; dy <= y_range; ++dy) {
int y_coord = new_coords_y + dy;
if (y_coord < 0 || y_coord >= R2) continue;
for (int dx = -1*x_range; dx <= x_range; ++dx) {
int x_coord = new_coords_x + dx;
if (x_coord < 0 || x_coord >= R3) continue;
int index = batch_idx * R1 * R2 * R3 + \
z_coord * R2 * R3 + \
y_coord * R3 + \
x_coord;
int neighbor_idx = point_indices[index];
if (neighbor_idx < 0) continue;
float x_per = xyz[neighbor_idx*3 + 0];
float y_per = xyz[neighbor_idx*3 + 1];
float z_per = xyz[neighbor_idx*3 + 2];
float dist2 = (x_per - new_x) * (x_per - new_x) + (y_per - new_y) * (y_per - new_y) + (z_per - new_z) * (z_per - new_z);
if (dist2 > radius2) continue;
++cnt2;
if (cnt < nsample) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[l] = neighbor_idx;
}
}
idx[cnt] = neighbor_idx;
++cnt;
}
// else {
// float rnd = curand_uniform(&state);
// if (rnd < (float(nsample) / cnt2)) {
// int insertidx = ceilf(curand_uniform(&state) * nsample) - 1;
// idx[insertidx] = neighbor_idx;
// }
// }
}
}
}
if (cnt == 0) idx[0] = -1;
}
void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3, int nsample,
float radius, int z_range, int y_range, int x_range, const float *new_xyz,
const float *xyz, const int *new_coords, const int *point_indices, int *idx) {
// :param new_coords: (M1 + M2 ..., 4) centers of the voxel query
// :param point_indices: (B, Z, Y, X)
// output:
// idx: (M1 + M2, nsample)
cudaError_t err;
dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
voxel_query_kernel_stack<<<blocks, threads>>>(M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz, new_coords, point_indices, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
fa390e945b5e79d82b67a83b164a2d5c51533961.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/BatchNormalization.cu"
#else
#define DeviceTensor3 THCDeviceTensor<real, 3>
#define DeviceTensor1 THCDeviceTensor<real, 1>
template <int Dim>
static THCDeviceTensor<real, Dim> devicetensor(THCState *state, THCTensor *t) {
if (!t) {
return THCDeviceTensor<real, Dim>();
}
int inDim = THCTensor_(nDimension)(state, t);
if (inDim == Dim) {
return toDeviceTensor<real, Dim>(state, t);
}
// View in which the last dimensions are collapsed or expanded as needed
THAssert(THCTensor_(isContiguous)(state, t));
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = t->size[i];
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= t->size[i];
}
}
return THCDeviceTensor<real, Dim>(THCTensor_(data)(state, t), size);
}
void THNN_(BatchNormalization_updateOutput)(
THCState *state, THCTensor *input_, THCTensor *output_,
THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_,
THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_,
bool train, double momentum, double eps) {
THCTensor_(resizeAs)(state, output_, input_);
DeviceTensor3 input = devicetensor<3>(state, input_);
DeviceTensor3 output = devicetensor<3>(state, output_);
DeviceTensor1 weight = devicetensor<1>(state, weight_);
DeviceTensor1 bias = devicetensor<1>(state, bias_);
DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_);
DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_);
DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_);
DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_);
hipStream_t s = THCState_getCurrentStream(state);
hipDeviceProp_t *prop = THCState_getCurrentDeviceProperties(state);
if (!train) {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationUpdateOutputInference_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, output, runningMean, runningVar, weight, bias, eps);
} else {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationUpdateOutput_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveStd);
}
THCudaCheck(hipGetLastError());
}
void THNN_(BatchNormalization_backward)(
THCState *state, THCTensor *input_, THCTensor *gradOutput_,
THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_,
THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_,
THCTensor *saveMean_, THCTensor *saveStd_, bool train, double scale, double eps) {
THCUNN_check_shape(state, input_, gradOutput_);
if (gradInput_) {
THCTensor_(resizeAs)(state, gradInput_, input_);
}
DeviceTensor3 input = devicetensor<3>(state, input_);
DeviceTensor3 gradOutput = devicetensor<3>(state, gradOutput_);
DeviceTensor3 gradInput = devicetensor<3>(state, gradInput_);
DeviceTensor1 gradWeight = devicetensor<1>(state, gradWeight_);
DeviceTensor1 gradBias = devicetensor<1>(state, gradBias_);
DeviceTensor1 weight = devicetensor<1>(state, weight_);
DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_);
DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_);
DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_);
DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_);
hipStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
dim3 threads(getNumThreads(gradOutput.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationBackward_kernel<real, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar,
saveMean, saveStd, train, scale, eps);
THCudaCheck(hipGetLastError());
}
#undef DeviceTensor3
#undef DeviceTensor1
#endif
| fa390e945b5e79d82b67a83b164a2d5c51533961.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/BatchNormalization.cu"
#else
#define DeviceTensor3 THCDeviceTensor<real, 3>
#define DeviceTensor1 THCDeviceTensor<real, 1>
template <int Dim>
static THCDeviceTensor<real, Dim> devicetensor(THCState *state, THCTensor *t) {
if (!t) {
return THCDeviceTensor<real, Dim>();
}
int inDim = THCTensor_(nDimension)(state, t);
if (inDim == Dim) {
return toDeviceTensor<real, Dim>(state, t);
}
// View in which the last dimensions are collapsed or expanded as needed
THAssert(THCTensor_(isContiguous)(state, t));
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = t->size[i];
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= t->size[i];
}
}
return THCDeviceTensor<real, Dim>(THCTensor_(data)(state, t), size);
}
void THNN_(BatchNormalization_updateOutput)(
THCState *state, THCTensor *input_, THCTensor *output_,
THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_,
THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_,
bool train, double momentum, double eps) {
THCTensor_(resizeAs)(state, output_, input_);
DeviceTensor3 input = devicetensor<3>(state, input_);
DeviceTensor3 output = devicetensor<3>(state, output_);
DeviceTensor1 weight = devicetensor<1>(state, weight_);
DeviceTensor1 bias = devicetensor<1>(state, bias_);
DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_);
DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_);
DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_);
DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_);
cudaStream_t s = THCState_getCurrentStream(state);
cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state);
if (!train) {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
BatchNormalizationUpdateOutputInference_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, output, runningMean, runningVar, weight, bias, eps);
} else {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
BatchNormalizationUpdateOutput_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveStd);
}
THCudaCheck(cudaGetLastError());
}
void THNN_(BatchNormalization_backward)(
THCState *state, THCTensor *input_, THCTensor *gradOutput_,
THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_,
THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_,
THCTensor *saveMean_, THCTensor *saveStd_, bool train, double scale, double eps) {
THCUNN_check_shape(state, input_, gradOutput_);
if (gradInput_) {
THCTensor_(resizeAs)(state, gradInput_, input_);
}
DeviceTensor3 input = devicetensor<3>(state, input_);
DeviceTensor3 gradOutput = devicetensor<3>(state, gradOutput_);
DeviceTensor3 gradInput = devicetensor<3>(state, gradInput_);
DeviceTensor1 gradWeight = devicetensor<1>(state, gradWeight_);
DeviceTensor1 gradBias = devicetensor<1>(state, gradBias_);
DeviceTensor1 weight = devicetensor<1>(state, weight_);
DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_);
DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_);
DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_);
DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_);
cudaStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
dim3 threads(getNumThreads(gradOutput.getSize(2)));
BatchNormalizationBackward_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar,
saveMean, saveStd, train, scale, eps);
THCudaCheck(cudaGetLastError());
}
#undef DeviceTensor3
#undef DeviceTensor1
#endif
|
b7ec7d46cea56ecf86cbafe77902e8c8d1d51db3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/nth_element.h>
#include <TAD.h>
#include <ShapeUtils.h>
#include <PointersManager.h>
#include <NativeOps.h>
#include <helpers/ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void fillUpElementKernel(void* outputBuffer, Nd4jLong* outputShapeInfo, void* inputBuffer, Nd4jLong* inputShapeInfo, Nd4jLong* pTadShape, Nd4jLong* pTadOffsets, Nd4jLong n) {
__shared__ T *z, *x;
__shared__ Nd4jLong bufferLength, arrLen;
if (threadIdx.x == 0) {
z = reinterpret_cast<T*>(outputBuffer);
x = reinterpret_cast<T*>(inputBuffer);
arrLen = shape::length(pTadShape);
bufferLength = shape::length(outputShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (int t = tid; t < bufferLength; t += step) {
auto tX = x + pTadOffsets[t];
z[shape::getIndexOffset(t, outputShapeInfo, bufferLength)] = tX[shape::getIndexOffset(n, pTadShape, arrLen)]; //tX];
}
}
template <typename T>
void nthElementFunctor_(nd4j::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) {
NDArray::prepareSpecialUse({output}, {input});
NDArray sortedVals(*input);
Nd4jPointer params[2];
params[0] = context;
params[1] = context->getCudaStream();
if (input->isVector()) {
NativeOps ops;
ops.sort(params, nullptr, sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), reverse);
hipMemcpy(reinterpret_cast<T*>(output->specialBuffer()), reinterpret_cast<T*>(sortedVals.specialBuffer()) + n, sizeof(T), hipMemcpyDeviceToDevice);
}
else { // rank greater than 1
std::vector<int> lastDims({input->rankOf() - 1});// = ShapeUtils::evalDimsToExclude(input->rankOf(), {input->rankOf() - 1});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(sortedVals.getShapeInfo(), lastDims);
auto pTadShape = packX.specialShapeInfo();
auto pTadShapeH = packX.primaryShapeInfo();
auto pTadOffsets = packX.specialOffsets();
// auto pLastDimData = (int*) manager.replicatePointer(lastDims.data(), lastDims.size() * sizeof(int));
NativeOps ops;
ops.sortTad(params, sortedVals.buffer(), sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), lastDims.data(), lastDims.size(), pTadShape, pTadOffsets, reverse);
// manager.synchronize();
sortedVals.tickWriteDevice();
sortedVals.syncToHost();
sortedVals.printIndexedBuffer("Hello");
sortedVals.printBuffer("Hello line");
auto stream = context->getCudaStream();
hipLaunchKernelGGL(( fillUpElementKernel<T>), dim3(32), dim3(64), 1024, *stream, output->specialBuffer(), output->specialShapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), pTadShape, pTadOffsets, n);
}
NDArray::registerSpecialUse({output}, {input});
}
void nthElementFunctor(nd4j::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) {
BUILD_SINGLE_SELECTOR(input->dataType(), nthElementFunctor_, (context, input, n, output, reverse), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void nthElementFunctor_, (nd4j::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse), LIBND4J_TYPES);
}
}
}
| b7ec7d46cea56ecf86cbafe77902e8c8d1d51db3.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/nth_element.h>
#include <TAD.h>
#include <ShapeUtils.h>
#include <PointersManager.h>
#include <NativeOps.h>
#include <helpers/ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void fillUpElementKernel(void* outputBuffer, Nd4jLong* outputShapeInfo, void* inputBuffer, Nd4jLong* inputShapeInfo, Nd4jLong* pTadShape, Nd4jLong* pTadOffsets, Nd4jLong n) {
__shared__ T *z, *x;
__shared__ Nd4jLong bufferLength, arrLen;
if (threadIdx.x == 0) {
z = reinterpret_cast<T*>(outputBuffer);
x = reinterpret_cast<T*>(inputBuffer);
arrLen = shape::length(pTadShape);
bufferLength = shape::length(outputShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * gridDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (int t = tid; t < bufferLength; t += step) {
auto tX = x + pTadOffsets[t];
z[shape::getIndexOffset(t, outputShapeInfo, bufferLength)] = tX[shape::getIndexOffset(n, pTadShape, arrLen)]; //tX];
}
}
template <typename T>
void nthElementFunctor_(nd4j::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) {
NDArray::prepareSpecialUse({output}, {input});
NDArray sortedVals(*input);
Nd4jPointer params[2];
params[0] = context;
params[1] = context->getCudaStream();
if (input->isVector()) {
NativeOps ops;
ops.sort(params, nullptr, sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), reverse);
cudaMemcpy(reinterpret_cast<T*>(output->specialBuffer()), reinterpret_cast<T*>(sortedVals.specialBuffer()) + n, sizeof(T), cudaMemcpyDeviceToDevice);
}
else { // rank greater than 1
std::vector<int> lastDims({input->rankOf() - 1});// = ShapeUtils::evalDimsToExclude(input->rankOf(), {input->rankOf() - 1});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(sortedVals.getShapeInfo(), lastDims);
auto pTadShape = packX.specialShapeInfo();
auto pTadShapeH = packX.primaryShapeInfo();
auto pTadOffsets = packX.specialOffsets();
// auto pLastDimData = (int*) manager.replicatePointer(lastDims.data(), lastDims.size() * sizeof(int));
NativeOps ops;
ops.sortTad(params, sortedVals.buffer(), sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), lastDims.data(), lastDims.size(), pTadShape, pTadOffsets, reverse);
// manager.synchronize();
sortedVals.tickWriteDevice();
sortedVals.syncToHost();
sortedVals.printIndexedBuffer("Hello");
sortedVals.printBuffer("Hello line");
auto stream = context->getCudaStream();
fillUpElementKernel<T><<<32, 64, 1024, *stream>>>(output->specialBuffer(), output->specialShapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), pTadShape, pTadOffsets, n);
}
NDArray::registerSpecialUse({output}, {input});
}
void nthElementFunctor(nd4j::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) {
BUILD_SINGLE_SELECTOR(input->dataType(), nthElementFunctor_, (context, input, n, output, reverse), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void nthElementFunctor_, (nd4j::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse), LIBND4J_TYPES);
}
}
}
|
3b9e9f814d8894d94cbd82ffa226186840f81812.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMode.cu"
#else
THC_API void THCTensor_(calculateMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position) {
THAssert(THCTensor_(isContiguous)(state, input));
// Because the input is contiguous, we want to get a reference to the
// location of the buffer at the innermost dimension that we are going
// to calculate the mode for --> we do this by manually doing the stride
// calculations to get an offset
real *data = THCTensor_(data)(state, input);
for (int i = 0; i < THLongStorage_size(position); ++i) {
data += THLongStorage_data(position)[i] * THCTensor_(stride)(state, input, i);
}
int64_t nElement = THCTensor_(size)(state, input, THCTensor_(nDimension)(state, input) - 1);
THCThrustAllocator thrustAlloc(state);
// Wrap input data, sortBuffer, in Thrust device vectors
thrust::device_ptr<real> vecPtr = thrust::device_pointer_cast(data);
thrust::device_vector<real> iter(vecPtr, vecPtr + nElement);
thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer));
thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement);
// Fill sortBuffer with [0, 1, 2, ... nElement - 1]
thrust::sequence(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
seq.begin(), seq.end());
// Sort the input data. The original indices of the data are stored in seq
thrust::sort_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), seq.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfLess()
#endif
);
// Count # of unique elements via an inner product between adjacent elements.
// Add 1 if two neighboring element are not equal.
int unique = 1 + thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(),
#if defined(THC_REAL_IS_HALF)
ThrustHalfNotEqualTo()
#else
thrust::not_equal_to<real>()
#endif
);
// Count frequency of each element
thrust::device_vector<real> keys(unique);
thrust::device_vector<int> counts(unique);
thrust::reduce_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(),
thrust::constant_iterator<int>(1), keys.begin(), counts.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfEqualTo()
#endif
);
// Find index of maximum count
thrust::device_vector<int>::iterator it = thrust::max_element(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
counts.begin(), counts.end());
real mode = keys[it - counts.begin()];
// Find first index within which it occurs
#if defined(THC_REAL_IS_HALF)
thrust::device_vector<real>::iterator positionIter = thrust::find_if(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode));
#else
thrust::device_vector<real>::iterator positionIter = thrust::find(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), mode);
#endif
THAssert(positionIter != iter.end());
int64_t index = TH_INDEX_BASE + seq[positionIter - iter.begin()];
// Place mode, index in output
ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values);
int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices);
for (int i = 0; i < THLongStorage_size(position); ++i) {
int64_t pos = THLongStorage_data(position)[i];
valuesOffset += THCTensor_(stride)(state, values, i) * pos;
indicesOffset += THCudaLongTensor_stride(state, indices, i) * pos;
}
THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode);
THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index);
}
// this probably could be a loop, not a recursive algorithm
THC_API void THCTensor_(dimApplyMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position,
int curDim) {
int64_t ndim = THCTensor_(nDimension)(state, input);
// Because we have transposed the Tensor, the data for the dimension we are mode'ing along
// is always in the innermost dimension
if (curDim == ndim - 1) {
THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position);
} else {
// Loop through the values and recurse
for (int i = 0; i < THCTensor_(size)(state, input, curDim); ++i) {
position->data[curDim] = i;
THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1);
}
}
}
#define MAX_GRID_SIZE 65535
#define MAX_BLOCK_SIZE 1024
THC_API void THCTensor_(mode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
int dimension,
int keepdim) {
THLongStorage *dim;
THCTensor *transposed, *contiguous, *valuesTransposed;
THLongStorage *position;
THCudaLongStorage *sortBuffer;
THCudaLongTensor *indicesTransposed;
int64_t ndim, sliceSize, slices;
THAssert(THCTensor_(checkGPU)(state, 1, values));
// Verify they are asking for a valid dimension
ndim = THCTensor_(nDimension)(state, input);
THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds");
sliceSize = THCTensor_(size)(state, input, dimension);
slices = THCTensor_(nElement)(state, input) / sliceSize;
// Resize output value, index Tensors to appropriate sizes (i.e. the same as
// the input Tensor, except at dim=dimension, the size is 1)
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, values, ndim, dimension, keepdim);
TensorUtils<THCudaLongTensor>::preserveReduceDimSemantics(
state, indices, ndim, dimension, keepdim);
dim = THCTensor_(newSizeOf)(state, input);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, values, dim, NULL);
THCudaLongTensor_resize(state, indices, dim, NULL);
THLongStorage_free(dim);
// If sliceSize is 1, copy input to values and set indices
if (sliceSize == 1) {
THCTensor_(copy)(state, values, input);
THCudaLongTensor_fill(state, indices, TH_INDEX_BASE);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
return;
}
// Requirements for fused kernel implementation:
//
// 1. sliceSize <= 2 * max threads per block
// 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for
// a kernel launch
// 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed)
if (sliceSize <= MAX_BLOCK_SIZE &&
slices <= MAX_GRID_SIZE &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, input)) {
// Beginning our optimized implementation. First thing we want to do is to transpose
// the input Tensor along the sort dimension, and then make it contiguous
transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1);
// Set-up TensorInfo structs for passing to kernel
TensorInfo<real, unsigned int> tiValues = getTensorInfo<THCTensor, unsigned int>(state, valuesTransposed);
TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<THCudaLongTensor, unsigned int>(state, indicesTransposed);
// The number of blocks is the number of slices that we need to calculate the mode for. Each block
// is responsible for computing a single mode
dim3 grid;
THC_getGridFromTiles(slices, grid);
// The blocksize is two elements per thread, rounded up to the nearest power of 2
int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Macro that calls kernel --> note that we set the block dimensions here, and
// the amount of shared memory
#define HANDLE_MODE(SIZE) \
{ \
dim3 blockSize(SIZE / 2); \
\
int memsize = (sizeof(real) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \
hipLaunchKernelGGL(( computeMode<real, SIZE>) \
, dim3(grid), dim3(blockSize), memsize, THCState_getCurrentStream(state), \
THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \
}
// Tradeoff between compilation time and the number of specializations. Ideally we would have
// one HANDLE_MODE for each power of 2
switch(ceilPowerOf2) {
case 2048:
HANDLE_MODE(2048)
break;
case 1024:
case 512:
case 256:
HANDLE_MODE(1024)
break;
case 128:
case 64:
HANDLE_MODE(128)
break;
case 32:
case 16:
case 8:
case 4:
case 2:
HANDLE_MODE(32)
break;
case 1:
default:
assert(false);
}
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, transposed);
THCTensor_(free)(state, contiguous);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
} else {
// Beginning our naive implementation: We don't want to mutate the input Tensor, but
// we need to be able to sort the inputs along the dimension in order to calculate the
// mode. Additionally, its ideal if the data along the dimension is contiguous. So
// we transpose the dimension with the innermost dimension and make a new contiguous
// version that we can use.
transposed = THCTensor_(newClone)(state, input);
THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
THCTensor_(free)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1);
// Position is a Storage that will store the dimension values we are processing
position = THLongStorage_newWithSize(ndim - 1);
// Sort Buffer is a Storage that will be used in the internal sort required to calculate
// the mode efficiently
sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize);
// Call mode
THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0);
THCTensor_(free)(state, contiguous);
THLongStorage_free(position);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
THCudaLongStorage_free(state, sortBuffer);
}
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
}
#undef MAX_GRID_SIZE
#undef MAX_BLOCK_SIZE
#endif
| 3b9e9f814d8894d94cbd82ffa226186840f81812.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMode.cu"
#else
THC_API void THCTensor_(calculateMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position) {
THAssert(THCTensor_(isContiguous)(state, input));
// Because the input is contiguous, we want to get a reference to the
// location of the buffer at the innermost dimension that we are going
// to calculate the mode for --> we do this by manually doing the stride
// calculations to get an offset
real *data = THCTensor_(data)(state, input);
for (int i = 0; i < THLongStorage_size(position); ++i) {
data += THLongStorage_data(position)[i] * THCTensor_(stride)(state, input, i);
}
int64_t nElement = THCTensor_(size)(state, input, THCTensor_(nDimension)(state, input) - 1);
THCThrustAllocator thrustAlloc(state);
// Wrap input data, sortBuffer, in Thrust device vectors
thrust::device_ptr<real> vecPtr = thrust::device_pointer_cast(data);
thrust::device_vector<real> iter(vecPtr, vecPtr + nElement);
thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer));
thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement);
// Fill sortBuffer with [0, 1, 2, ... nElement - 1]
thrust::sequence(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
seq.begin(), seq.end());
// Sort the input data. The original indices of the data are stored in seq
thrust::sort_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), seq.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfLess()
#endif
);
// Count # of unique elements via an inner product between adjacent elements.
// Add 1 if two neighboring element are not equal.
int unique = 1 + thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(),
#if defined(THC_REAL_IS_HALF)
ThrustHalfNotEqualTo()
#else
thrust::not_equal_to<real>()
#endif
);
// Count frequency of each element
thrust::device_vector<real> keys(unique);
thrust::device_vector<int> counts(unique);
thrust::reduce_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(),
thrust::constant_iterator<int>(1), keys.begin(), counts.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfEqualTo()
#endif
);
// Find index of maximum count
thrust::device_vector<int>::iterator it = thrust::max_element(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
counts.begin(), counts.end());
real mode = keys[it - counts.begin()];
// Find first index within which it occurs
#if defined(THC_REAL_IS_HALF)
thrust::device_vector<real>::iterator positionIter = thrust::find_if(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode));
#else
thrust::device_vector<real>::iterator positionIter = thrust::find(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), mode);
#endif
THAssert(positionIter != iter.end());
int64_t index = TH_INDEX_BASE + seq[positionIter - iter.begin()];
// Place mode, index in output
ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values);
int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices);
for (int i = 0; i < THLongStorage_size(position); ++i) {
int64_t pos = THLongStorage_data(position)[i];
valuesOffset += THCTensor_(stride)(state, values, i) * pos;
indicesOffset += THCudaLongTensor_stride(state, indices, i) * pos;
}
THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode);
THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index);
}
// this probably could be a loop, not a recursive algorithm
THC_API void THCTensor_(dimApplyMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position,
int curDim) {
int64_t ndim = THCTensor_(nDimension)(state, input);
// Because we have transposed the Tensor, the data for the dimension we are mode'ing along
// is always in the innermost dimension
if (curDim == ndim - 1) {
THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position);
} else {
// Loop through the values and recurse
for (int i = 0; i < THCTensor_(size)(state, input, curDim); ++i) {
position->data[curDim] = i;
THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1);
}
}
}
#define MAX_GRID_SIZE 65535
#define MAX_BLOCK_SIZE 1024
THC_API void THCTensor_(mode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
int dimension,
int keepdim) {
THLongStorage *dim;
THCTensor *transposed, *contiguous, *valuesTransposed;
THLongStorage *position;
THCudaLongStorage *sortBuffer;
THCudaLongTensor *indicesTransposed;
int64_t ndim, sliceSize, slices;
THAssert(THCTensor_(checkGPU)(state, 1, values));
// Verify they are asking for a valid dimension
ndim = THCTensor_(nDimension)(state, input);
THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds");
sliceSize = THCTensor_(size)(state, input, dimension);
slices = THCTensor_(nElement)(state, input) / sliceSize;
// Resize output value, index Tensors to appropriate sizes (i.e. the same as
// the input Tensor, except at dim=dimension, the size is 1)
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, values, ndim, dimension, keepdim);
TensorUtils<THCudaLongTensor>::preserveReduceDimSemantics(
state, indices, ndim, dimension, keepdim);
dim = THCTensor_(newSizeOf)(state, input);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, values, dim, NULL);
THCudaLongTensor_resize(state, indices, dim, NULL);
THLongStorage_free(dim);
// If sliceSize is 1, copy input to values and set indices
if (sliceSize == 1) {
THCTensor_(copy)(state, values, input);
THCudaLongTensor_fill(state, indices, TH_INDEX_BASE);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
return;
}
// Requirements for fused kernel implementation:
//
// 1. sliceSize <= 2 * max threads per block
// 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for
// a kernel launch
// 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed)
if (sliceSize <= MAX_BLOCK_SIZE &&
slices <= MAX_GRID_SIZE &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, input)) {
// Beginning our optimized implementation. First thing we want to do is to transpose
// the input Tensor along the sort dimension, and then make it contiguous
transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1);
// Set-up TensorInfo structs for passing to kernel
TensorInfo<real, unsigned int> tiValues = getTensorInfo<THCTensor, unsigned int>(state, valuesTransposed);
TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<THCudaLongTensor, unsigned int>(state, indicesTransposed);
// The number of blocks is the number of slices that we need to calculate the mode for. Each block
// is responsible for computing a single mode
dim3 grid;
THC_getGridFromTiles(slices, grid);
// The blocksize is two elements per thread, rounded up to the nearest power of 2
int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Macro that calls kernel --> note that we set the block dimensions here, and
// the amount of shared memory
#define HANDLE_MODE(SIZE) \
{ \
dim3 blockSize(SIZE / 2); \
\
int memsize = (sizeof(real) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \
computeMode<real, SIZE> \
<<<grid, blockSize, memsize, THCState_getCurrentStream(state)>>>( \
THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \
}
// Tradeoff between compilation time and the number of specializations. Ideally we would have
// one HANDLE_MODE for each power of 2
switch(ceilPowerOf2) {
case 2048:
HANDLE_MODE(2048)
break;
case 1024:
case 512:
case 256:
HANDLE_MODE(1024)
break;
case 128:
case 64:
HANDLE_MODE(128)
break;
case 32:
case 16:
case 8:
case 4:
case 2:
HANDLE_MODE(32)
break;
case 1:
default:
assert(false);
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, transposed);
THCTensor_(free)(state, contiguous);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
} else {
// Beginning our naive implementation: We don't want to mutate the input Tensor, but
// we need to be able to sort the inputs along the dimension in order to calculate the
// mode. Additionally, its ideal if the data along the dimension is contiguous. So
// we transpose the dimension with the innermost dimension and make a new contiguous
// version that we can use.
transposed = THCTensor_(newClone)(state, input);
THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
THCTensor_(free)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1);
// Position is a Storage that will store the dimension values we are processing
position = THLongStorage_newWithSize(ndim - 1);
// Sort Buffer is a Storage that will be used in the internal sort required to calculate
// the mode efficiently
sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize);
// Call mode
THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0);
THCTensor_(free)(state, contiguous);
THLongStorage_free(position);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
THCudaLongStorage_free(state, sortBuffer);
}
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
}
#undef MAX_GRID_SIZE
#undef MAX_BLOCK_SIZE
#endif
|
7fc0d94015eb8bcb180afb3f92e77aa6b6990ddc.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <iostream>
// CUDA runtime
#include <hip/hip_runtime.h>
#include "bloomfilter.h"
using namespace std;
//const int bf_size = 2 * 1024 * 1024;
__global__ void cudaSetBitArray(unsigned char *filter, long long *bitArray)
{
int tid = blockIdx.x;
filter[bitArray[tid] >> 3] |= (1 << (bitArray[tid] & 7));
}
__global__ void cudaLookBitArray(unsigned char *filter, long long *bitArray, int *res)
{
int tid = blockIdx.x;
if(!(filter[bitArray[tid] >> 3] & (1 << (bitArray[tid] & 7))))
*res = 0;
}
__global__ void cudaLookFilters(unsigned char **filters, long long *bitArray, unsigned char *ans)
{
int nFilter = blockIdx.x;
int nHash = threadIdx.x;
if(!(filters[nFilter][bitArray[nHash] >> 3] & (1 << (bitArray[nHash] & 7))))
ans[nFilter] = 0;
}
__global__ void cudaMergeFilter(unsigned char *dstFilter, unsigned char *srcFilter)
{
int idx = blockIdx.x * 1024 + threadIdx.x;
dstFilter[idx] |= srcFilter[idx];
}
Bloomfilter::Bloomfilter(long long size, int numHash,long long (**hash)(const char *))
{
this->size = size;
this->numHash = numHash;
this->hash = (long long (**)(const char *))malloc(sizeof(long long (*)(const char *)) * numHash);
for(int i=0;i<numHash;i++)
this->hash[i] = hash[i];
error_handling( hipMalloc((void **)&filter, size / 8 + 1) );
error_handling( hipMemset((void *)filter, 0, size / 8 + 1) );
error_handling( hipMalloc((void **)&cudaBitArray, sizeof(long long) * numHash) );
error_handling( hipMalloc((void **)&cudaRes, sizeof(int)) );
}
Bloomfilter::~Bloomfilter()
{
hipFree(cudaBitArray);
hipFree(filter);
hipFree(cudaRes);
}
void Bloomfilter::insert(const char *data)
{
long long *bitArray;
getBitArray(bitArray, data);
hipLaunchKernelGGL(( cudaSetBitArray), dim3(numHash),dim3(1), 0, 0, filter, cudaBitArray);
}
bool Bloomfilter::lookup(const char *data)
{
long long *bitArray;
getBitArray(bitArray, data);
return lookBitArray(bitArray);
}
unsigned char *Bloomfilter::getFilter()
{
return filter;
}
void Bloomfilter::copyFilter(unsigned char *hostFilter)
{
return error_handling( hipMemcpy((void *)hostFilter, (const void *)filter, size / 8 + 1, hipMemcpyDeviceToHost) );
}
void Bloomfilter::setFilter(unsigned char *hostFilter)
{
return error_handling( hipMemcpy((void *)filter, (const void *)hostFilter, size / 8 + 1, hipMemcpyHostToDevice) );
}
void Bloomfilter::getBitArray(long long *&bitArray, const char *data)
{
long long array[20];
for(int i=0;i<numHash;i++)
{
array[i] = hash[i](data) % size;
}
error_handling( hipMemcpy((void *)cudaBitArray, (const void *)array, sizeof(long long) * numHash, hipMemcpyHostToDevice) );
bitArray = cudaBitArray;
}
bool Bloomfilter::lookBitArray(long long *bitArray)
{
int res = 1;
error_handling( hipMemcpy((void *)cudaRes, (const void *)&res, sizeof(int), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( cudaLookBitArray), dim3(numHash),dim3(1), 0, 0, filter, bitArray, cudaRes);
error_handling( hipMemcpy((void *)&res, (const void *)cudaRes,sizeof(int), hipMemcpyDeviceToHost) );
return !!res;
}
void Bloomfilter::initFilters(unsigned char ***filters, unsigned int size)
{
error_handling( hipMalloc((void **)filters, size * sizeof(unsigned char *)) );
}
void Bloomfilter::insertFilters(unsigned char **filters, unsigned int idx)
{
//cout << filters << endl;
//cout << filters + idx << endl;
error_handling( hipMemcpy((void **)(filters + idx), (const void *)&filter, sizeof(unsigned char *), hipMemcpyHostToDevice) );
}
void Bloomfilter::initAnswer(unsigned char **ans, unsigned int size)
{
error_handling( hipMalloc((void **)ans, size) );
}
void Bloomfilter::setAnswer(unsigned char *ans, unsigned int size)
{
error_handling( hipMemset((void *)ans, 1, size) );
}
void Bloomfilter::lookFilters(unsigned char **filters, unsigned char *cuda_ans, long long *bitArray, unsigned char *ans, unsigned int size)
{
setAnswer(cuda_ans, size);
dim3 block(size);
dim3 thread(11);
hipLaunchKernelGGL(( cudaLookFilters), dim3(block), dim3(thread), 0, 0, filters, bitArray, cuda_ans);
error_handling( hipMemcpy((void *)ans, (const void *)cuda_ans, size, hipMemcpyDeviceToHost) );
}
void Bloomfilter::zeroFilter()
{
error_handling( hipMemset((void *)filter, 0, size / 8 + 1) );
}
void Bloomfilter::mergeFilter(unsigned char *filter)
{
dim3 block(1024*2);
dim3 thread(1024);
hipLaunchKernelGGL(( cudaMergeFilter), dim3(block), dim3(thread), 0, 0, this->filter, filter);
}
void Bloomfilter::error_handling(hipError_t n)
{
if(n)
{
printf("Error! %d\n",n);
exit(0);
}
}
| 7fc0d94015eb8bcb180afb3f92e77aa6b6990ddc.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <iostream>
// CUDA runtime
#include <cuda_runtime.h>
#include "bloomfilter.h"
using namespace std;
//const int bf_size = 2 * 1024 * 1024;
__global__ void cudaSetBitArray(unsigned char *filter, long long *bitArray)
{
int tid = blockIdx.x;
filter[bitArray[tid] >> 3] |= (1 << (bitArray[tid] & 7));
}
__global__ void cudaLookBitArray(unsigned char *filter, long long *bitArray, int *res)
{
int tid = blockIdx.x;
if(!(filter[bitArray[tid] >> 3] & (1 << (bitArray[tid] & 7))))
*res = 0;
}
__global__ void cudaLookFilters(unsigned char **filters, long long *bitArray, unsigned char *ans)
{
int nFilter = blockIdx.x;
int nHash = threadIdx.x;
if(!(filters[nFilter][bitArray[nHash] >> 3] & (1 << (bitArray[nHash] & 7))))
ans[nFilter] = 0;
}
__global__ void cudaMergeFilter(unsigned char *dstFilter, unsigned char *srcFilter)
{
int idx = blockIdx.x * 1024 + threadIdx.x;
dstFilter[idx] |= srcFilter[idx];
}
Bloomfilter::Bloomfilter(long long size, int numHash,long long (**hash)(const char *))
{
this->size = size;
this->numHash = numHash;
this->hash = (long long (**)(const char *))malloc(sizeof(long long (*)(const char *)) * numHash);
for(int i=0;i<numHash;i++)
this->hash[i] = hash[i];
error_handling( cudaMalloc((void **)&filter, size / 8 + 1) );
error_handling( cudaMemset((void *)filter, 0, size / 8 + 1) );
error_handling( cudaMalloc((void **)&cudaBitArray, sizeof(long long) * numHash) );
error_handling( cudaMalloc((void **)&cudaRes, sizeof(int)) );
}
Bloomfilter::~Bloomfilter()
{
cudaFree(cudaBitArray);
cudaFree(filter);
cudaFree(cudaRes);
}
void Bloomfilter::insert(const char *data)
{
long long *bitArray;
getBitArray(bitArray, data);
cudaSetBitArray<<<numHash,1>>>(filter, cudaBitArray);
}
bool Bloomfilter::lookup(const char *data)
{
long long *bitArray;
getBitArray(bitArray, data);
return lookBitArray(bitArray);
}
unsigned char *Bloomfilter::getFilter()
{
return filter;
}
void Bloomfilter::copyFilter(unsigned char *hostFilter)
{
return error_handling( cudaMemcpy((void *)hostFilter, (const void *)filter, size / 8 + 1, cudaMemcpyDeviceToHost) );
}
void Bloomfilter::setFilter(unsigned char *hostFilter)
{
return error_handling( cudaMemcpy((void *)filter, (const void *)hostFilter, size / 8 + 1, cudaMemcpyHostToDevice) );
}
void Bloomfilter::getBitArray(long long *&bitArray, const char *data)
{
long long array[20];
for(int i=0;i<numHash;i++)
{
array[i] = hash[i](data) % size;
}
error_handling( cudaMemcpy((void *)cudaBitArray, (const void *)array, sizeof(long long) * numHash, cudaMemcpyHostToDevice) );
bitArray = cudaBitArray;
}
bool Bloomfilter::lookBitArray(long long *bitArray)
{
int res = 1;
error_handling( cudaMemcpy((void *)cudaRes, (const void *)&res, sizeof(int), cudaMemcpyHostToDevice) );
cudaLookBitArray<<<numHash,1>>>(filter, bitArray, cudaRes);
error_handling( cudaMemcpy((void *)&res, (const void *)cudaRes,sizeof(int), cudaMemcpyDeviceToHost) );
return !!res;
}
void Bloomfilter::initFilters(unsigned char ***filters, unsigned int size)
{
error_handling( cudaMalloc((void **)filters, size * sizeof(unsigned char *)) );
}
void Bloomfilter::insertFilters(unsigned char **filters, unsigned int idx)
{
//cout << filters << endl;
//cout << filters + idx << endl;
error_handling( cudaMemcpy((void **)(filters + idx), (const void *)&filter, sizeof(unsigned char *), cudaMemcpyHostToDevice) );
}
void Bloomfilter::initAnswer(unsigned char **ans, unsigned int size)
{
error_handling( cudaMalloc((void **)ans, size) );
}
void Bloomfilter::setAnswer(unsigned char *ans, unsigned int size)
{
error_handling( cudaMemset((void *)ans, 1, size) );
}
void Bloomfilter::lookFilters(unsigned char **filters, unsigned char *cuda_ans, long long *bitArray, unsigned char *ans, unsigned int size)
{
setAnswer(cuda_ans, size);
dim3 block(size);
dim3 thread(11);
cudaLookFilters<<<block, thread>>>(filters, bitArray, cuda_ans);
error_handling( cudaMemcpy((void *)ans, (const void *)cuda_ans, size, cudaMemcpyDeviceToHost) );
}
void Bloomfilter::zeroFilter()
{
error_handling( cudaMemset((void *)filter, 0, size / 8 + 1) );
}
void Bloomfilter::mergeFilter(unsigned char *filter)
{
dim3 block(1024*2);
dim3 thread(1024);
cudaMergeFilter<<<block, thread>>>(this->filter, filter);
}
void Bloomfilter::error_handling(cudaError_t n)
{
if(n)
{
printf("Error! %d\n",n);
exit(0);
}
}
|
16fffad98251e180ee386ac6c62e7648cf6bf5c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//#include <cutil_inline.h>
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// Shortcut shared memory atomic addition functions
////////////////////////////////////////////////////////////////////////////////
#define USE_SMEM_ATOMICS 0
#if(!USE_SMEM_ATOMICS)
#define TAG_MASK ( (1U << (UINT_BITS - LOG2_WARP_SIZE)) - 1U )
inline __device__ void addByte(volatile uint *s_WarpHist, uint data, uint threadTag){
uint count;
do{
count = s_WarpHist[data] & TAG_MASK;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
}while(s_WarpHist[data] != count);
}
#else
#ifdef CUDA_NO_SM12_ATOMIC_INTRINSICS
#error Compilation target does not support shared-memory atomics
#endif
#define TAG_MASK 0xFFFFFFFFU
inline __device__ void addByte(uint *s_WarpHist, uint data, uint threadTag){
atomicAdd(s_WarpHist + data, 1);
}
#endif
inline __device__ void addWord(uint *s_WarpHist, uint data, uint tag){
addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
__global__ void histogram256Kernel(uint *d_PartialHistograms, uint *d_Data, uint dataCount){
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint *s_WarpHist= s_Hist + (threadIdx.x >> LOG2_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for(uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
//Cycle through the entire data set, update subhistograms for each warp
#ifndef __DEVICE_EMULATION__
const uint tag = threadIdx.x << (UINT_BITS - LOG2_WARP_SIZE);
#else
const uint tag = 0;
#endif
__syncthreads();
for(uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x)){
uint data = d_Data[pos];
addWord(s_WarpHist, data, tag);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for(uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE){
uint sum = 0;
for(uint i = 0; i < WARP_COUNT; i++)
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
//#define MERGE_THREADBLOCK_SIZE 256
#define MERGE_THREADBLOCK_SIZE 4
__global__ void mergeHistogram256Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
){
uint sum = 0;
for(uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for(uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1){
__syncthreads();
if(threadIdx.x < stride)
data[threadIdx.x] += data[threadIdx.x + stride];
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = data[0];
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU histogram
////////////////////////////////////////////////////////////////////////////////
//histogram256kernel() intermediate results buffer
static const uint PARTIAL_HISTOGRAM256_COUNT = 1;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram256(void){
hipMalloc((void **)&d_PartialHistograms, PARTIAL_HISTOGRAM256_COUNT * HISTOGRAM256_BIN_COUNT * sizeof(uint)) ;
}
//Internal memory deallocation
extern "C" void closeHistogram256(void){
hipFree(d_PartialHistograms) ;
}
extern "C" void histogram256(
uint *d_Histogram,
void *d_Data,
uint byteCount
){
assert( byteCount % sizeof(uint) == 0 );
hipLaunchKernelGGL(( histogram256Kernel), dim3(PARTIAL_HISTOGRAM256_COUNT), dim3(HISTOGRAM256_THREADBLOCK_SIZE), 0, 0,
d_PartialHistograms,
(uint *)d_Data,
byteCount / sizeof(uint)
);
//cutilCheckMsg("histogram256Kernel() execution failed\n");
hipLaunchKernelGGL(( mergeHistogram256Kernel), dim3(HISTOGRAM256_BIN_COUNT), dim3(MERGE_THREADBLOCK_SIZE), 0, 0,
d_Histogram,
d_PartialHistograms,
PARTIAL_HISTOGRAM256_COUNT
);
//cutilCheckMsg("mergeHistogram256Kernel() execution failed\n");
}
| 16fffad98251e180ee386ac6c62e7648cf6bf5c8.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
//#include <cutil_inline.h>
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// Shortcut shared memory atomic addition functions
////////////////////////////////////////////////////////////////////////////////
#define USE_SMEM_ATOMICS 0
#if(!USE_SMEM_ATOMICS)
#define TAG_MASK ( (1U << (UINT_BITS - LOG2_WARP_SIZE)) - 1U )
inline __device__ void addByte(volatile uint *s_WarpHist, uint data, uint threadTag){
uint count;
do{
count = s_WarpHist[data] & TAG_MASK;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
}while(s_WarpHist[data] != count);
}
#else
#ifdef CUDA_NO_SM12_ATOMIC_INTRINSICS
#error Compilation target does not support shared-memory atomics
#endif
#define TAG_MASK 0xFFFFFFFFU
inline __device__ void addByte(uint *s_WarpHist, uint data, uint threadTag){
atomicAdd(s_WarpHist + data, 1);
}
#endif
inline __device__ void addWord(uint *s_WarpHist, uint data, uint tag){
addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
__global__ void histogram256Kernel(uint *d_PartialHistograms, uint *d_Data, uint dataCount){
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint *s_WarpHist= s_Hist + (threadIdx.x >> LOG2_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for(uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
//Cycle through the entire data set, update subhistograms for each warp
#ifndef __DEVICE_EMULATION__
const uint tag = threadIdx.x << (UINT_BITS - LOG2_WARP_SIZE);
#else
const uint tag = 0;
#endif
__syncthreads();
for(uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x)){
uint data = d_Data[pos];
addWord(s_WarpHist, data, tag);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for(uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE){
uint sum = 0;
for(uint i = 0; i < WARP_COUNT; i++)
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
//#define MERGE_THREADBLOCK_SIZE 256
#define MERGE_THREADBLOCK_SIZE 4
__global__ void mergeHistogram256Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
){
uint sum = 0;
for(uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for(uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1){
__syncthreads();
if(threadIdx.x < stride)
data[threadIdx.x] += data[threadIdx.x + stride];
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = data[0];
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU histogram
////////////////////////////////////////////////////////////////////////////////
//histogram256kernel() intermediate results buffer
static const uint PARTIAL_HISTOGRAM256_COUNT = 1;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram256(void){
cudaMalloc((void **)&d_PartialHistograms, PARTIAL_HISTOGRAM256_COUNT * HISTOGRAM256_BIN_COUNT * sizeof(uint)) ;
}
//Internal memory deallocation
extern "C" void closeHistogram256(void){
cudaFree(d_PartialHistograms) ;
}
extern "C" void histogram256(
uint *d_Histogram,
void *d_Data,
uint byteCount
){
assert( byteCount % sizeof(uint) == 0 );
histogram256Kernel<<<PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE>>>(
d_PartialHistograms,
(uint *)d_Data,
byteCount / sizeof(uint)
);
//cutilCheckMsg("histogram256Kernel() execution failed\n");
mergeHistogram256Kernel<<<HISTOGRAM256_BIN_COUNT, MERGE_THREADBLOCK_SIZE>>>(
d_Histogram,
d_PartialHistograms,
PARTIAL_HISTOGRAM256_COUNT
);
//cutilCheckMsg("mergeHistogram256Kernel() execution failed\n");
}
|
65fe9bf46449030b949aad5dcff511fd0dcdc4e1.hip | // !!! This is a file automatically generated by hipify!!!
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//****************************************************************************
#include <iostream>
#include <iomanip>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
system("pause");
exit(1);
}
}
//Voy a usar como variables constantes para los kernels, la matriz input de imagen y la matriz de filtro
#define TAMFILTRO 5
__constant__ float d_const_filter[TAMFILTRO*TAMFILTRO];
__global__
void box_filter(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols, const int filterWidth)
{
const int2 thread_2D_pos = make_int2(blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.x * blockDim.x + threadIdx.x);
const int thread_1D_pos = thread_2D_pos.x * numCols + thread_2D_pos.y;
if (thread_2D_pos.x >= numRows || thread_2D_pos.y >= numCols)
return;
int contador = 0;
float result = 0.0f;
for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r){
for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c){
int image_r = thread_2D_pos.x + filter_r;
int image_c = thread_2D_pos.y + filter_c;
if ((image_c >= 0) && (image_c < numCols) && (image_r >= 0) && (image_r < numRows)){
float image_value = inputChannel[image_r * numCols + image_c];
float filter_value = d_const_filter[contador];
result += image_value * filter_value;
}
contador++;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
int id = thread_1D_pos;
redChannel[id] = inputImageRGBA[id].x;
greenChannel[id] = inputImageRGBA[id].y;
blueChannel[id] = inputImageRGBA[id].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Reservar memoria para el filtro en GPU: d_filter, la cual ya esta declarada
// Copiar el filtro (h_filter) a memoria global de la GPU (d_filter)
// checkCudaErrors(hipMalloc(&d_filter, sizeof(unsigned char) * filterWidth * filterWidth));
checkCudaErrors(hipMemcpyToSymbol(d_const_filter, h_filter, sizeof(float) * filterWidth * filterWidth));
// hipMemcpy(d_filter, h_filter, sizeof(unsigned char) * filterWidth * filterWidth, hipMemcpyHostToDevice);//Copiamos el d_filter a GPU.
}
void create_filter(float **h_filter, int *filterWidth){
const int KernelWidth = 5; //OJO CON EL TAMAO DEL FILTRO//
*filterWidth = KernelWidth;
//create and fill the filter we will convolve with
*h_filter = new float[KernelWidth * KernelWidth];
/*
//Filtro gaussiano: blur
const float KernelSigma = 2.;
float filterSum = 0.f; //for normalization
for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r) {
for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c) {
float filterValue = expf( -(float)(c * c + r * r) / (2.f * KernelSigma * KernelSigma));
(*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] = filterValue;
filterSum += filterValue;
}
}
float normalizationFactor = 1.f / filterSum;
for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r) {
for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c) {
(*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] *= normalizationFactor;
}
}
*/
//Laplaciano 5x5
(*h_filter)[0] = 0; (*h_filter)[1] = 0; (*h_filter)[2] = -1.; (*h_filter)[3] = 0; (*h_filter)[4] = 0;
(*h_filter)[5] = 1.; (*h_filter)[6] = -1.; (*h_filter)[7] = -2.; (*h_filter)[8] = -1.; (*h_filter)[9] = 0;
(*h_filter)[10] = -1.;(*h_filter)[11] = -2.; (*h_filter)[12] = 17.; (*h_filter)[13] = -2.; (*h_filter)[14] = -1.;
(*h_filter)[15] = 1.; (*h_filter)[16] = -1.; (*h_filter)[17] = -2.; (*h_filter)[18] = -1.; (*h_filter)[19] = 0;
(*h_filter)[20] = 0; (*h_filter)[21] = 0; (*h_filter)[22] = -1.; (*h_filter)[23] = 0; (*h_filter)[24] = 0;
//TODO: crear los filtros segun necesidad
//NOTA: cuidado al establecer el tamao del filtro a utilizar
}
void convolution(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redFiltered,
unsigned char *d_greenFiltered,
unsigned char *d_blueFiltered,
const int filterWidth)
{
//TODO: Calcular tamaos de bloque
const dim3 blockSize(16,16,1);
const dim3 gridSize((numCols / blockSize.x) + 1, (numRows / blockSize.y) + 1, 1);
//TODO: Lanzar kernel para separar imagenes RGBA en diferentes colores
separateChannels << < gridSize, blockSize >> >(d_inputImageRGBA, numRows, numCols, d_redFiltered, d_greenFiltered, d_blueFiltered);
//TODO: Ejecutar convolucin. Una por canal
box_filter << <gridSize, blockSize >> > (d_redFiltered, d_red, numRows, numCols, filterWidth);
box_filter << <gridSize, blockSize >> > (d_greenFiltered, d_green, numRows, numCols, filterWidth);
box_filter << <gridSize, blockSize >> > (d_blueFiltered, d_blue, numRows, numCols, filterWidth);
// Recombining the results.
//recombineChannels << <gridSize, blockSize >> >(d_redFiltered, d_greenFiltered, d_blueFiltered, d_outputImageRGBA, numRows, numCols);
recombineChannels << <gridSize, blockSize >> >(d_red, d_green, d_blue, d_outputImageRGBA, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//system("pause");
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| 65fe9bf46449030b949aad5dcff511fd0dcdc4e1.cu | //****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//****************************************************************************
#include <iostream>
#include <iomanip>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
system("pause");
exit(1);
}
}
//Voy a usar como variables constantes para los kernels, la matriz input de imagen y la matriz de filtro
#define TAMFILTRO 5
__constant__ float d_const_filter[TAMFILTRO*TAMFILTRO];
__global__
void box_filter(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols, const int filterWidth)
{
const int2 thread_2D_pos = make_int2(blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.x * blockDim.x + threadIdx.x);
const int thread_1D_pos = thread_2D_pos.x * numCols + thread_2D_pos.y;
if (thread_2D_pos.x >= numRows || thread_2D_pos.y >= numCols)
return;
int contador = 0;
float result = 0.0f;
for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r){
for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c){
int image_r = thread_2D_pos.x + filter_r;
int image_c = thread_2D_pos.y + filter_c;
if ((image_c >= 0) && (image_c < numCols) && (image_r >= 0) && (image_r < numRows)){
float image_value = inputChannel[image_r * numCols + image_c];
float filter_value = d_const_filter[contador];
result += image_value * filter_value;
}
contador++;
}
}
outputChannel[thread_1D_pos] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
int id = thread_1D_pos;
redChannel[id] = inputImageRGBA[id].x;
greenChannel[id] = inputImageRGBA[id].y;
blueChannel[id] = inputImageRGBA[id].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Reservar memoria para el filtro en GPU: d_filter, la cual ya esta declarada
// Copiar el filtro (h_filter) a memoria global de la GPU (d_filter)
// checkCudaErrors(cudaMalloc(&d_filter, sizeof(unsigned char) * filterWidth * filterWidth));
checkCudaErrors(cudaMemcpyToSymbol(d_const_filter, h_filter, sizeof(float) * filterWidth * filterWidth));
// cudaMemcpy(d_filter, h_filter, sizeof(unsigned char) * filterWidth * filterWidth, cudaMemcpyHostToDevice);//Copiamos el d_filter a GPU.
}
void create_filter(float **h_filter, int *filterWidth){
const int KernelWidth = 5; //OJO CON EL TAMAÑO DEL FILTRO//
*filterWidth = KernelWidth;
//create and fill the filter we will convolve with
*h_filter = new float[KernelWidth * KernelWidth];
/*
//Filtro gaussiano: blur
const float KernelSigma = 2.;
float filterSum = 0.f; //for normalization
for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r) {
for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c) {
float filterValue = expf( -(float)(c * c + r * r) / (2.f * KernelSigma * KernelSigma));
(*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] = filterValue;
filterSum += filterValue;
}
}
float normalizationFactor = 1.f / filterSum;
for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r) {
for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c) {
(*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] *= normalizationFactor;
}
}
*/
//Laplaciano 5x5
(*h_filter)[0] = 0; (*h_filter)[1] = 0; (*h_filter)[2] = -1.; (*h_filter)[3] = 0; (*h_filter)[4] = 0;
(*h_filter)[5] = 1.; (*h_filter)[6] = -1.; (*h_filter)[7] = -2.; (*h_filter)[8] = -1.; (*h_filter)[9] = 0;
(*h_filter)[10] = -1.;(*h_filter)[11] = -2.; (*h_filter)[12] = 17.; (*h_filter)[13] = -2.; (*h_filter)[14] = -1.;
(*h_filter)[15] = 1.; (*h_filter)[16] = -1.; (*h_filter)[17] = -2.; (*h_filter)[18] = -1.; (*h_filter)[19] = 0;
(*h_filter)[20] = 0; (*h_filter)[21] = 0; (*h_filter)[22] = -1.; (*h_filter)[23] = 0; (*h_filter)[24] = 0;
//TODO: crear los filtros segun necesidad
//NOTA: cuidado al establecer el tamaño del filtro a utilizar
}
void convolution(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redFiltered,
unsigned char *d_greenFiltered,
unsigned char *d_blueFiltered,
const int filterWidth)
{
//TODO: Calcular tamaños de bloque
const dim3 blockSize(16,16,1);
const dim3 gridSize((numCols / blockSize.x) + 1, (numRows / blockSize.y) + 1, 1);
//TODO: Lanzar kernel para separar imagenes RGBA en diferentes colores
separateChannels << < gridSize, blockSize >> >(d_inputImageRGBA, numRows, numCols, d_redFiltered, d_greenFiltered, d_blueFiltered);
//TODO: Ejecutar convolución. Una por canal
box_filter << <gridSize, blockSize >> > (d_redFiltered, d_red, numRows, numCols, filterWidth);
box_filter << <gridSize, blockSize >> > (d_greenFiltered, d_green, numRows, numCols, filterWidth);
box_filter << <gridSize, blockSize >> > (d_blueFiltered, d_blue, numRows, numCols, filterWidth);
// Recombining the results.
//recombineChannels << <gridSize, blockSize >> >(d_redFiltered, d_greenFiltered, d_blueFiltered, d_outputImageRGBA, numRows, numCols);
recombineChannels << <gridSize, blockSize >> >(d_red, d_green, d_blue, d_outputImageRGBA, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//system("pause");
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
067fb673d5e53816161a5cc0d70643d77ad0bdec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2022 Institute of Parallel and Distributed Systems, Shanghai Jiao Tong University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <cassert>
#include <chrono>
#include <numeric>
#include "../common.h"
#include "../constant.h"
#include "../device.h"
#include "../logging.h"
#include "../run_config.h"
#include "../timer.h"
#include "cuda_random_states.h"
namespace samgraph {
namespace common {
namespace cuda {
namespace {
__global__ void init_random_states(hiprandState_t *states, size_t num,
unsigned long seed) {
size_t threadId = threadIdx.x + blockIdx.x * blockDim.x;
if (threadId < num) {
/** Using different seed & constant sequence 0 can reduce memory
* consumption by 800M
* https://docs.nvidia.com/cuda/hiprand/device-api-overview.html#performance-notes
*/
hiprand_init(seed+threadId, 0, 0, &states[threadId]);
}
}
size_t PredictRandomWalkMaxThreads(size_t num_nodes, size_t num_random_walk) {
size_t block_x = Constant::kCudaBlockSize;
size_t block_y = 1;
while (block_x >= 2 * num_random_walk) {
block_x /= 2;
block_y *= 2;
}
size_t grid_x = RoundUpDiv(num_nodes, block_y);
return grid_x * block_x * block_y;
}
} // namespace
GPURandomStates::GPURandomStates(SampleType sample_type,
const std::vector<size_t> &fanout,
const size_t batch_size, Context ctx) {
_ctx = ctx;
auto device = Device::Get(_ctx);
switch (sample_type) {
case kKHop0:
_num_states = PredictNumNodes(batch_size, fanout, fanout.size() - 1);
break;
case kKHop1:
_num_states = PredictNumNodes(batch_size, fanout, fanout.size());
_num_states = Min(_num_states, Constant::kKHop1MaxThreads);
break;
case kWeightedKHop:
case kWeightedKHopPrefix:
_num_states = PredictNumNodes(batch_size, fanout, fanout.size());
_num_states = Min(_num_states, Constant::kWeightedKHopMaxThreads);
break;
case kRandomWalk:
_num_states = PredictRandomWalkMaxThreads(
PredictNumNodes(batch_size, fanout, fanout.size() - 1),
RunConfig::num_random_walk);
break;
case kKHop2:
_num_states = PredictNumNodes(batch_size, fanout, fanout.size() - 1);
break;
case kWeightedKHopHashDedup:
_num_states = PredictNumNodes(batch_size, fanout, fanout.size() - 1);
break;
default:
CHECK(0);
}
_states = static_cast<hiprandState_t *>(
device->AllocDataSpace(_ctx, sizeof(hiprandState_t) * _num_states));
const dim3 grid(
RoundUpDiv(_num_states, static_cast<size_t>(Constant::kCudaBlockSize)));
const dim3 block(Constant::kCudaBlockSize);
unsigned long seed =
std::chrono::system_clock::now().time_since_epoch().count();
hipLaunchKernelGGL(( init_random_states), dim3(grid), dim3(block), 0, 0, _states, _num_states, seed);
}
GPURandomStates::~GPURandomStates() {
auto device = Device::Get(_ctx);
device->FreeDataSpace(_ctx, _states);
}
} // namespace cuda
} // namespace common
} // namespace samgraph
| 067fb673d5e53816161a5cc0d70643d77ad0bdec.cu | /*
* Copyright 2022 Institute of Parallel and Distributed Systems, Shanghai Jiao Tong University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <cassert>
#include <chrono>
#include <numeric>
#include "../common.h"
#include "../constant.h"
#include "../device.h"
#include "../logging.h"
#include "../run_config.h"
#include "../timer.h"
#include "cuda_random_states.h"
namespace samgraph {
namespace common {
namespace cuda {
namespace {
__global__ void init_random_states(curandState *states, size_t num,
unsigned long seed) {
size_t threadId = threadIdx.x + blockIdx.x * blockDim.x;
if (threadId < num) {
/** Using different seed & constant sequence 0 can reduce memory
* consumption by 800M
* https://docs.nvidia.com/cuda/curand/device-api-overview.html#performance-notes
*/
curand_init(seed+threadId, 0, 0, &states[threadId]);
}
}
size_t PredictRandomWalkMaxThreads(size_t num_nodes, size_t num_random_walk) {
size_t block_x = Constant::kCudaBlockSize;
size_t block_y = 1;
while (block_x >= 2 * num_random_walk) {
block_x /= 2;
block_y *= 2;
}
size_t grid_x = RoundUpDiv(num_nodes, block_y);
return grid_x * block_x * block_y;
}
} // namespace
GPURandomStates::GPURandomStates(SampleType sample_type,
const std::vector<size_t> &fanout,
const size_t batch_size, Context ctx) {
_ctx = ctx;
auto device = Device::Get(_ctx);
switch (sample_type) {
case kKHop0:
_num_states = PredictNumNodes(batch_size, fanout, fanout.size() - 1);
break;
case kKHop1:
_num_states = PredictNumNodes(batch_size, fanout, fanout.size());
_num_states = Min(_num_states, Constant::kKHop1MaxThreads);
break;
case kWeightedKHop:
case kWeightedKHopPrefix:
_num_states = PredictNumNodes(batch_size, fanout, fanout.size());
_num_states = Min(_num_states, Constant::kWeightedKHopMaxThreads);
break;
case kRandomWalk:
_num_states = PredictRandomWalkMaxThreads(
PredictNumNodes(batch_size, fanout, fanout.size() - 1),
RunConfig::num_random_walk);
break;
case kKHop2:
_num_states = PredictNumNodes(batch_size, fanout, fanout.size() - 1);
break;
case kWeightedKHopHashDedup:
_num_states = PredictNumNodes(batch_size, fanout, fanout.size() - 1);
break;
default:
CHECK(0);
}
_states = static_cast<curandState *>(
device->AllocDataSpace(_ctx, sizeof(curandState) * _num_states));
const dim3 grid(
RoundUpDiv(_num_states, static_cast<size_t>(Constant::kCudaBlockSize)));
const dim3 block(Constant::kCudaBlockSize);
unsigned long seed =
std::chrono::system_clock::now().time_since_epoch().count();
init_random_states<<<grid, block>>>(_states, _num_states, seed);
}
GPURandomStates::~GPURandomStates() {
auto device = Device::Get(_ctx);
device->FreeDataSpace(_ctx, _states);
}
} // namespace cuda
} // namespace common
} // namespace samgraph
|
5b73d92f914af8f2e34d739c61be63cde5504c97.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef USE_MKL
#define USE_MKL
#endif // USE_MKL
#include "common.hpp"
#include "cusolverDn.h"
#ifdef USE_COMPLEX
#ifdef USE_FLOAT
#define dtype hipComplex
#define btype float
#define hipsolverDnXpotrf_bufferSize hipsolverDnCpotrf_bufferSize
#define hipsolverDnXpotrf hipsolverDnCpotrf
#else // USE_DOUBLE
#define dtype hipDoubleComplex
#define btype double
#define hipsolverDnXpotrf_bufferSize hipsolverDnZpotrf_bufferSize
#define hipsolverDnXpotrf hipsolverDnZpotrf
#endif // USE_FLOAT
#else // USE_REAL
#ifdef USE_FLOAT
#define dtype float
#define hipsolverDnXpotrf_bufferSize hipsolverDnSpotrf_bufferSize
#define hipsolverDnXpotrf hipsolverDnSpotrf
#else // USE_DOUBLE
#define dtype double
#define hipsolverDnXpotrf_bufferSize hipsolverDnDpotrf_bufferSize
#define hipsolverDnXpotrf hipsolverDnDpotrf
#endif // USE_FLOAT
#define btype dtype
#endif // USE_COMPLEX
static const char *const lin_fmt = "%d,%#.17E,%#.17E,%#.17E,%#.17E,%#.17E,%#.17E,%#.17E,%#.17E,%#.17E\n";
static int Nmin = 0, Nmax = 0, Nstep = 0, _samples = 0, device_ = 0, _devices = 0, lda = 0, Lwork = 0;
static dtype *Agpu = (dtype*)NULL, *Workspace = (dtype*)NULL, *Acpu = (dtype*)NULL;
static hipsolverDnHandle_t handle;
static double device_count()
{
const double go = omp_get_wtime();
const hipError_t error = hipGetDeviceCount(&_devices); const int lin = __LINE__;
switch (error) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case hipErrorNoDevice:
(void)fprintf(stderr, "[%s@%s:%d] NoDevice\n", __FUNCTION__, __FILE__, lin);
exit(error);
case hipErrorInsufficientDriver:
(void)fprintf(stderr, "[%s@%s:%d] InsufficientDriver\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
return (omp_get_wtime() - go);
}
static double set_device()
{
const double go = omp_get_wtime();
int device = 0;
(void)hipGetDevice(&device);
if (device != device_) {
const hipError_t error = hipSetDevice(device_); const int lin = __LINE__;
switch (error) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case hipErrorInvalidDevice:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevice\n", __FUNCTION__, __FILE__, lin);
exit(error);
case hipErrorDeviceAlreadyInUse:
(void)fprintf(stderr, "[%s@%s:%d] DeviceAlreadyInUse\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
}
return (omp_get_wtime() - go);
}
static double create_handle()
{
const double go = omp_get_wtime();
const cusolverStatus_t status = hipsolverDnCreate(&handle); const int lin = __LINE__;
switch (status) {
case CUSOLVER_STATUS_SUCCESS:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] SUCCESS\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case CUSOLVER_STATUS_NOT_INITIALIZED:
(void)fprintf(stderr, "[%s@%s:%d] NOT_INITIALIZED\n", __FUNCTION__, __FILE__, lin);
exit(status);
case CUSOLVER_STATUS_ALLOC_FAILED:
(void)fprintf(stderr, "[%s@%s:%d] ALLOC_FAILED\n", __FUNCTION__, __FILE__, lin);
exit(status);
case CUSOLVER_STATUS_ARCH_MISMATCH:
(void)fprintf(stderr, "[%s@%s:%d] ARCH_MISMATCH\n", __FUNCTION__, __FILE__, lin);
exit(status);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, status);
exit(status);
}
return (omp_get_wtime() - go);
}
static double alloc_gpu_mtx()
{
const double go = omp_get_wtime();
size_t pitch = 0;
const hipError_t err1 = hipMallocPitch(&Agpu, &pitch, Nmax * sizeof(dtype), Nmax); const int lin1 = __LINE__;
switch (err1) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin1);
#endif // !NDEBUG
break;
case hipErrorMemoryAllocation:
(void)fprintf(stderr, "[%s@%s:%d] MemoryAllocation\n", __FUNCTION__, __FILE__, lin1);
exit(err1);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin1, err1);
exit(err1);
}
lda = int(pitch / sizeof(dtype));
#ifndef NDEBUG
(void)fprintf(stdout, "lda = %d\n", lda);
#endif // !NDEBUG
const double end = (omp_get_wtime() - go);
// don't time clearing the memory
const hipError_t err2 = hipMemset2D(Agpu, pitch, 0, pitch, Nmax); const int lin2 = __LINE__;
switch (err2) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin2);
#endif // !NDEBUG
break;
case hipErrorInvalidValue:
(void)fprintf(stderr, "[%s@%s:%d] InvalidValue\n", __FUNCTION__, __FILE__, lin2);
exit(err2);
case hipErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin2);
exit(err2);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin2, err2);
exit(err2);
}
return end;
}
static double find_lwork()
{
const double go = omp_get_wtime();
int LworkL = -1, maxLworkL = 0;
int LworkU = -1, maxLworkU = 0;
for (int n = Nmin; n <= Nmax; n += Nstep) {
const cusolverStatus_t stat1 = hipsolverDnXpotrf_bufferSize(handle, HIPBLAS_FILL_MODE_LOWER, n, Agpu, lda, &LworkL); const int lin1 = __LINE__;
switch (stat1) {
case CUSOLVER_STATUS_SUCCESS:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] SUCCESS\n", __FUNCTION__, __FILE__, lin1);
#endif // !NDEBUG
break;
case CUSOLVER_STATUS_NOT_INITIALIZED:
(void)fprintf(stderr, "[%s@%s:%d] NOT_INITIALIZED\n", __FUNCTION__, __FILE__, lin1);
exit(stat1);
case CUSOLVER_STATUS_INVALID_VALUE:
(void)fprintf(stderr, "[%s@%s:%d] INVALID_VALUE\n", __FUNCTION__, __FILE__, lin1);
exit(stat1);
case CUSOLVER_STATUS_ARCH_MISMATCH:
(void)fprintf(stderr, "[%s@%s:%d] ARCH_MISMATCH\n", __FUNCTION__, __FILE__, lin1);
exit(stat1);
case CUSOLVER_STATUS_INTERNAL_ERROR:
(void)fprintf(stderr, "[%s@%s:%d] INTERNAL_ERROR\n", __FUNCTION__, __FILE__, lin1);
exit(stat1);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin1, stat1);
exit(stat1);
}
if (LworkL > maxLworkL)
maxLworkL = LworkL;
const cusolverStatus_t stat2 = hipsolverDnXpotrf_bufferSize(handle, HIPBLAS_FILL_MODE_UPPER, n, Agpu, lda, &LworkU); const int lin2 = __LINE__;
switch (stat2) {
case CUSOLVER_STATUS_SUCCESS:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] SUCCESS\n", __FUNCTION__, __FILE__, lin2);
#endif // !NDEBUG
break;
case CUSOLVER_STATUS_NOT_INITIALIZED:
(void)fprintf(stderr, "[%s@%s:%d] NOT_INITIALIZED\n", __FUNCTION__, __FILE__, lin2);
exit(stat2);
case CUSOLVER_STATUS_INVALID_VALUE:
(void)fprintf(stderr, "[%s@%s:%d] INVALID_VALUE\n", __FUNCTION__, __FILE__, lin2);
exit(stat2);
case CUSOLVER_STATUS_ARCH_MISMATCH:
(void)fprintf(stderr, "[%s@%s:%d] ARCH_MISMATCH\n", __FUNCTION__, __FILE__, lin2);
exit(stat2);
case CUSOLVER_STATUS_INTERNAL_ERROR:
(void)fprintf(stderr, "[%s@%s:%d] INTERNAL_ERROR\n", __FUNCTION__, __FILE__, lin2);
exit(stat2);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin2, stat2);
exit(stat2);
}
if (LworkU > maxLworkU)
maxLworkU = LworkU;
}
Lwork = ((maxLworkL >= maxLworkU) ? maxLworkL : maxLworkU);
#ifndef NDEBUG
(void)fprintf(stdout, "Lwork = %d, LworkL = %d, LworkU = %d\n", Lwork, maxLworkL, maxLworkU);
#endif // !NDEBUG
return (omp_get_wtime() - go);
}
static double alloc_gpu_wrk()
{
const double go = omp_get_wtime();
if (Lwork > 0) {
const hipError_t err1 = hipMalloc(&Workspace, (Lwork + 1) * sizeof(dtype)); const int lin1 = __LINE__;
switch (err1) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin1);
#endif // !NDEBUG
break;
case hipErrorMemoryAllocation:
(void)fprintf(stderr, "[%s@%s:%d] MemoryAllocation\n", __FUNCTION__, __FILE__, lin1);
exit(err1);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin1, err1);
exit(err1);
}
const hipError_t err2 = hipMemset(Workspace, 0, (Lwork + 1) * sizeof(dtype)); const int lin2 = __LINE__;
switch (err2) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin2);
#endif // !NDEBUG
break;
case hipErrorInvalidValue:
(void)fprintf(stderr, "[%s@%s:%d] InvalidValue\n", __FUNCTION__, __FILE__, lin2);
exit(err2);
case hipErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin2);
exit(err2);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin2, err2);
exit(err2);
}
}
return (omp_get_wtime() - go);
}
static double alloc_cpu_mtx()
{
const double go = omp_get_wtime();
const size_t size = size_t(lda) * Nmax * sizeof(dtype);
const hipError_t error = hipHostMalloc(&Acpu, size); const int lin = __LINE__;
switch (error) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case hipErrorMemoryAllocation:
(void)fprintf(stderr, "[%s@%s:%d] MemoryAllocation\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
const double end = (omp_get_wtime() - go);
// don't time clearing the memory
(void)memset(Acpu, 0, size);
return end;
}
static double init_cpu_mtx()
{
const double go = omp_get_wtime();
static const int idist = 1;
int iseed[4] = { 0, 1, 2, 3 };
const int k = Nmax - 1;
int info = 0;
dtype *const wrk = (dtype*)calloc(3 * Nmax, sizeof(dtype)); const int lin1 = __LINE__;
if (!wrk) {
(void)fprintf(stderr, "[%s@%s:%d,%d] ", __FUNCTION__, __FILE__, lin1, errno);
perror("calloc");
exit(errno);
}
#ifdef USE_COMPLEX
// Diagonal
REAL_LAPACK(larnv)(&idist, iseed, &Nmax, (btype*)wrk);
// Acpu
CMPLX_LAPACK(laghe)(&Nmax, &k, (btype*)wrk, (MKL_Complex*)Acpu, &lda, iseed, (MKL_Complex*)(wrk + Nmax), &info); const int lin2 = __LINE__;
#else // USE_REAL
// Diagonal
REAL_LAPACK(larnv)(&idist, iseed, &Nmax, wrk);
// Acpu
REAL_LAPACK(lagsy)(&Nmax, &k, wrk, Acpu, &lda, iseed, wrk + Nmax, &info); const int lin2 = __LINE__;
#endif // USE_COMPLEX
if (info) {
(void)fprintf(stderr, "[%s@%s:%d] INFO = %d\n", __FUNCTION__, __FILE__, lin2, info);
exit(info);
}
free(wrk);
return (omp_get_wtime() - go);
}
static double copy_mtx_cpu2gpu(const int n)
{
const double go = omp_get_wtime();
if (n >= Nmin) {
if (n > Nmax) {
(void)fprintf(stderr, "[%s@%s] n == %d > Nmax == %d\n", __FUNCTION__, __FILE__, n, Nmax);
exit(n);
}
const size_t pitch = lda * sizeof(dtype);
const hipError_t error = hipMemcpy2D(Agpu, pitch, Acpu, pitch, n * sizeof(dtype), n, hipMemcpyHostToDevice); const int lin1 = __LINE__;
switch (error) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin1);
#endif // !NDEBUG
break;
case hipErrorInvalidValue:
(void)fprintf(stderr, "[%s@%s:%d] InvalidValue\n", __FUNCTION__, __FILE__, lin1);
exit(error);
case hipErrorInvalidPitchValue:
(void)fprintf(stderr, "[%s@%s:%d] InvalidPitchValue\n", __FUNCTION__, __FILE__, lin1);
exit(error);
case hipErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin1);
exit(error);
case hipErrorInvalidMemcpyDirection:
(void)fprintf(stderr, "[%s@%s:%d] InvalidMemcpyDirection\n", __FUNCTION__, __FILE__, lin1);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin1, error);
exit(error);
}
// just to be sure...
(void)hipDeviceSynchronize();
}
else {
(void)fprintf(stderr, "[%s@%s] n == %d < Nmin == %d\n", __FUNCTION__, __FILE__, n, Nmin);
exit(n);
}
return (omp_get_wtime() - go);
}
static double potrf_gpu(const bool upper, const int n)
{
const double go = omp_get_wtime();
if (n >= Nmin) {
if (n > Nmax) {
(void)fprintf(stderr, "[%s@%s] n == %d > Nmax == %d\n", __FUNCTION__, __FILE__, n, Nmax);
exit(n);
}
const hipblasFillMode_t uplo = (upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER);
const cusolverStatus_t status = hipsolverDnXpotrf(handle, uplo, n, Agpu, lda, Workspace, Lwork, (int*)(Workspace + Lwork)); const int lin1 = __LINE__;
int devInfo = 0;
(void)hipDeviceSynchronize();
const hipError_t error = hipMemcpy(&devInfo, Workspace + Lwork, sizeof(int), hipMemcpyDeviceToHost); const int lin2 = __LINE__;
(void)hipDeviceSynchronize();
switch (error) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin2);
#endif // !NDEBUG
break;
case hipErrorInvalidValue:
(void)fprintf(stderr, "[%s@%s:%d] InvalidValue\n", __FUNCTION__, __FILE__, lin2);
exit(error);
case hipErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin2);
exit(error);
case hipErrorInvalidMemcpyDirection:
(void)fprintf(stderr, "[%s@%s:%d] InvalidMemcpyDirection\n", __FUNCTION__, __FILE__, lin2);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin2, error);
exit(error);
}
switch (status) {
case CUSOLVER_STATUS_SUCCESS:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d,%d] SUCCESS\n", __FUNCTION__, __FILE__, lin1, devInfo);
#endif // !NDEBUG
break;
case CUSOLVER_STATUS_NOT_INITIALIZED:
(void)fprintf(stderr, "[%s@%s:%d,%d] NOT_INITIALIZED\n", __FUNCTION__, __FILE__, lin1, devInfo);
exit(status);
case CUSOLVER_STATUS_INVALID_VALUE:
(void)fprintf(stderr, "[%s@%s:%d,%d] INVALID_VALUE\n", __FUNCTION__, __FILE__, lin1, devInfo);
exit(status);
case CUSOLVER_STATUS_ARCH_MISMATCH:
(void)fprintf(stderr, "[%s@%s:%d,%d] ARCH_MISMATCH\n", __FUNCTION__, __FILE__, lin1, devInfo);
exit(status);
case CUSOLVER_STATUS_INTERNAL_ERROR:
(void)fprintf(stderr, "[%s@%s:%d,%d] INTERNAL_ERROR\n", __FUNCTION__, __FILE__, lin1, devInfo);
exit(status);
default:
(void)fprintf(stderr, "[%s@%s:%d,%d] unknown error %d\n", __FUNCTION__, __FILE__, lin1, devInfo, status);
exit(status);
}
if (devInfo) {
(void)fprintf(stderr, "[%s@%s:%d] INFO = %d\n", __FUNCTION__, __FILE__, lin1, devInfo);
exit(devInfo);
}
}
else {
(void)fprintf(stderr, "[%s@%s] n == %d < Nmin == %d\n", __FUNCTION__, __FILE__, n, Nmin);
exit(n);
}
return (omp_get_wtime() - go);
}
static double free_cpu_mtx()
{
const double go = omp_get_wtime();
if (Acpu) {
const hipError_t error = hipHostFree(Acpu); const int lin = __LINE__;
switch (error) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case hipErrorInitializationError:
(void)fprintf(stderr, "[%s@%s:%d] InitializationError\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
Acpu = (dtype*)NULL;
}
return (omp_get_wtime() - go);
}
static double free_gpu_wrk()
{
const double go = omp_get_wtime();
if (Workspace) {
const hipError_t error = hipFree(Workspace); const int lin = __LINE__;
switch (error) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case hipErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin);
exit(error);
case hipErrorInitializationError:
(void)fprintf(stderr, "[%s@%s:%d] InitializationError\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
Workspace = (dtype*)NULL;
}
return (omp_get_wtime() - go);
}
static double free_gpu_mtx()
{
const double go = omp_get_wtime();
const hipError_t error = hipFree(Agpu); const int lin = __LINE__;
switch (error) {
case hipSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case hipErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin);
exit(error);
case hipErrorInitializationError:
(void)fprintf(stderr, "[%s@%s:%d] InitializationError\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
Agpu = (dtype*)NULL;
return (omp_get_wtime() - go);
}
static double destroy_handle()
{
const double go = omp_get_wtime();
const cusolverStatus_t status = hipsolverDnDestroy(handle); const int lin = __LINE__;
switch (status) {
case CUSOLVER_STATUS_SUCCESS:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] SUCCESS\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case CUSOLVER_STATUS_NOT_INITIALIZED:
(void)fprintf(stderr, "[%s@%s:%d] NOT_INITIALIZED\n", __FUNCTION__, __FILE__, lin);
exit(status);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, status);
exit(status);
}
return (omp_get_wtime() - go);
}
int main(int argc, char* argv[])
{
if ((argc < 5) || (argc > 6)) {
(void)fprintf(stderr, "%s Nmin Nmax Nstep #samples [device#]\n", *argv);
return EXIT_FAILURE;
}
if ((Nmin = atoi(argv[1])) <= 0) {
(void)fprintf(stderr, "Nmin == %d <= 0\n", Nmin);
return EXIT_FAILURE;
}
if ((Nmax = atoi(argv[2])) <= 0) {
(void)fprintf(stderr, "Nmax == %d <= 0\n", Nmax);
return EXIT_FAILURE;
}
if (Nmax < Nmin) {
(void)fprintf(stderr, "Nmax == %d < Nmin == %d\n", Nmax, Nmin);
return EXIT_FAILURE;
}
if ((Nstep = atoi(argv[3])) <= 0) {
(void)fprintf(stderr, "Nstep == %d <= 0\n", Nstep);
return EXIT_FAILURE;
}
if ((_samples = atoi(argv[4])) <= 0) {
(void)fprintf(stderr, "#samples == %d <= 0\n", _samples);
return EXIT_FAILURE;
}
if ((argc == 6) && ((device_ = atoi(argv[5])) < 0)) {
(void)fprintf(stderr, "device# == %d < 0\n", device_);
return EXIT_FAILURE;
}
const char *const env_nthr = getenv("MKL_NUM_THREADS");
if (!env_nthr) {
(void)fprintf(stderr, "MKL_NUM_THREADS environment variable not set\n");
return EXIT_FAILURE;
}
const int mkl_nthr = atoi(env_nthr);
if (mkl_nthr <= 0) {
(void)fprintf(stderr, "MKL_NUM_THREADS = %d <= 0\n", mkl_nthr);
return EXIT_FAILURE;
}
const double resol = omp_get_wtick();
#ifndef NDEBUG
(void)fprintf(stdout, "[omp_get_wtick] %#.17E s\n", resol);
#endif // !NDEBUG
(void)device_count();
if (device_ > _devices) {
(void)fprintf(stderr, "device# == %d > #devices == %d\n", device_, _devices);
return EXIT_FAILURE;
}
(void)set_device();
(void)create_handle();
const double agpu_time = alloc_gpu_mtx();
const double lwrk_time = find_lwork();
double awrk_time = alloc_gpu_wrk();
// number of tests
int ntst = (Nmax - Nmin) + 1;
// at least one test
ntst = ((ntst < Nstep) ? 1 : (ntst / Nstep));
// add the average time to find Lwork
awrk_time += lwrk_time / (2 * ntst);
const double acpu_time = alloc_cpu_mtx();
const double init_time = init_cpu_mtx();
#ifndef NDEBUG
(void)fprintf(stdout, "[init_cpu_mtx] %#.17E s\n", init_time);
#endif // !NDEBUG
(void)fprintf(stdout, "\"N\",\"COPY_H2D_MIN_s\",\"COPY_H2D_AVG_s\",\"COPY_H2D_MAX_s\",\"LPOTRF_MIN_s\",\"LPOTRF_AVG_s\",\"LPOTRF_MAX_s\",\"UPOTRF_MIN_s\",\"UPOTRF_AVG_s\",\"UPOTRF_MAX_s\"\n");
(void)fflush(stdout);
for (int n = Nmin; n <= Nmax; n += Nstep) {
double Lcopy_times_min = INFINITY;
double Lcopy_times_max = -0.0;
double Lcopy_times_avg = -0.0;
double Ucopy_times_min = INFINITY;
double Ucopy_times_max = -0.0;
double Ucopy_times_avg = -0.0;
double Lpotrf_times_min = INFINITY;
double Lpotrf_times_max = -0.0;
double Lpotrf_times_avg = -0.0;
double Upotrf_times_min = INFINITY;
double Upotrf_times_max = -0.0;
double Upotrf_times_avg = -0.0;
for (int sample = 0; sample < _samples; ++sample) {
const double Lcopy_time = copy_mtx_cpu2gpu(n);
#ifndef NDEBUG
(void)fprintf(stdout, "[copy_mtx_cpu2gpu(%d),%d,L] %#.17E s\n", n, sample, Lcopy_time);
#endif // !NDEBUG
if (Lcopy_time < Lcopy_times_min)
Lcopy_times_min = Lcopy_time;
if (Lcopy_time > Lcopy_times_max)
Lcopy_times_max = Lcopy_time;
Lcopy_times_avg += Lcopy_time / _samples;
const double Lpotrf_time = potrf_gpu(false, n);
#ifndef NDEBUG
(void)fprintf(stdout, "[potrf_gpu(%d),%d,L] %#.17E s\n", n, sample, Lpotrf_time);
#endif // !NDEBUG
if (Lpotrf_time < Lpotrf_times_min)
Lpotrf_times_min = Lpotrf_time;
if (Lpotrf_time > Lpotrf_times_max)
Lpotrf_times_max = Lpotrf_time;
Lpotrf_times_avg += Lpotrf_time / _samples;
const double Ucopy_time = copy_mtx_cpu2gpu(n);
#ifndef NDEBUG
(void)fprintf(stdout, "[copy_mtx_cpu2gpu(%d),%d,U] %#.17E s\n", n, sample, Ucopy_time);
#endif // !NDEBUG
if (Ucopy_time < Ucopy_times_min)
Ucopy_times_min = Ucopy_time;
if (Ucopy_time > Ucopy_times_max)
Ucopy_times_max = Ucopy_time;
Ucopy_times_avg += Ucopy_time / _samples;
const double Upotrf_time = potrf_gpu(true, n);
#ifndef NDEBUG
(void)fprintf(stdout, "[potrf_gpu(%d),%d,U] %#.17E s\n", n, sample, Upotrf_time);
#endif // !NDEBUG
if (Upotrf_time < Upotrf_times_min)
Upotrf_times_min = Upotrf_time;
if (Upotrf_time > Upotrf_times_max)
Upotrf_times_max = Upotrf_time;
Upotrf_times_avg += Upotrf_time / _samples;
}
const double copy_times_min = ((Lcopy_times_min <= Ucopy_times_min) ? Lcopy_times_min : Ucopy_times_min);
const double copy_times_max = ((Lcopy_times_max >= Ucopy_times_max) ? Lcopy_times_max : Ucopy_times_max);
const double copy_times_avg = (Lcopy_times_avg + Ucopy_times_avg) / 2;
(void)fprintf(stdout, lin_fmt, n,
copy_times_min, copy_times_avg, copy_times_max,
Lpotrf_times_min, Lpotrf_times_avg, Lpotrf_times_max,
Upotrf_times_min, Upotrf_times_avg, Upotrf_times_max);
(void)fflush(stdout);
}
const double fcpu_time = free_cpu_mtx();
const double fgpu_time = free_gpu_wrk();
const double fwrk_time = free_gpu_mtx();
(void)destroy_handle();
(void)fprintf(stdout, lin_fmt, -_samples,
resol, double(mkl_nthr), init_time,
acpu_time, agpu_time, awrk_time,
fcpu_time, fgpu_time, fwrk_time);
(void)fflush(stdout);
return EXIT_SUCCESS;
}
| 5b73d92f914af8f2e34d739c61be63cde5504c97.cu | #ifndef USE_MKL
#define USE_MKL
#endif // USE_MKL
#include "common.hpp"
#include "cusolverDn.h"
#ifdef USE_COMPLEX
#ifdef USE_FLOAT
#define dtype cuComplex
#define btype float
#define cusolverDnXpotrf_bufferSize cusolverDnCpotrf_bufferSize
#define cusolverDnXpotrf cusolverDnCpotrf
#else // USE_DOUBLE
#define dtype cuDoubleComplex
#define btype double
#define cusolverDnXpotrf_bufferSize cusolverDnZpotrf_bufferSize
#define cusolverDnXpotrf cusolverDnZpotrf
#endif // USE_FLOAT
#else // USE_REAL
#ifdef USE_FLOAT
#define dtype float
#define cusolverDnXpotrf_bufferSize cusolverDnSpotrf_bufferSize
#define cusolverDnXpotrf cusolverDnSpotrf
#else // USE_DOUBLE
#define dtype double
#define cusolverDnXpotrf_bufferSize cusolverDnDpotrf_bufferSize
#define cusolverDnXpotrf cusolverDnDpotrf
#endif // USE_FLOAT
#define btype dtype
#endif // USE_COMPLEX
static const char *const lin_fmt = "%d,%#.17E,%#.17E,%#.17E,%#.17E,%#.17E,%#.17E,%#.17E,%#.17E,%#.17E\n";
static int Nmin = 0, Nmax = 0, Nstep = 0, _samples = 0, device_ = 0, _devices = 0, lda = 0, Lwork = 0;
static dtype *Agpu = (dtype*)NULL, *Workspace = (dtype*)NULL, *Acpu = (dtype*)NULL;
static cusolverDnHandle_t handle;
static double device_count()
{
const double go = omp_get_wtime();
const cudaError_t error = cudaGetDeviceCount(&_devices); const int lin = __LINE__;
switch (error) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case cudaErrorNoDevice:
(void)fprintf(stderr, "[%s@%s:%d] NoDevice\n", __FUNCTION__, __FILE__, lin);
exit(error);
case cudaErrorInsufficientDriver:
(void)fprintf(stderr, "[%s@%s:%d] InsufficientDriver\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
return (omp_get_wtime() - go);
}
static double set_device()
{
const double go = omp_get_wtime();
int device = 0;
(void)cudaGetDevice(&device);
if (device != device_) {
const cudaError_t error = cudaSetDevice(device_); const int lin = __LINE__;
switch (error) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case cudaErrorInvalidDevice:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevice\n", __FUNCTION__, __FILE__, lin);
exit(error);
case cudaErrorDeviceAlreadyInUse:
(void)fprintf(stderr, "[%s@%s:%d] DeviceAlreadyInUse\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
}
return (omp_get_wtime() - go);
}
static double create_handle()
{
const double go = omp_get_wtime();
const cusolverStatus_t status = cusolverDnCreate(&handle); const int lin = __LINE__;
switch (status) {
case CUSOLVER_STATUS_SUCCESS:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] SUCCESS\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case CUSOLVER_STATUS_NOT_INITIALIZED:
(void)fprintf(stderr, "[%s@%s:%d] NOT_INITIALIZED\n", __FUNCTION__, __FILE__, lin);
exit(status);
case CUSOLVER_STATUS_ALLOC_FAILED:
(void)fprintf(stderr, "[%s@%s:%d] ALLOC_FAILED\n", __FUNCTION__, __FILE__, lin);
exit(status);
case CUSOLVER_STATUS_ARCH_MISMATCH:
(void)fprintf(stderr, "[%s@%s:%d] ARCH_MISMATCH\n", __FUNCTION__, __FILE__, lin);
exit(status);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, status);
exit(status);
}
return (omp_get_wtime() - go);
}
static double alloc_gpu_mtx()
{
const double go = omp_get_wtime();
size_t pitch = 0;
const cudaError_t err1 = cudaMallocPitch(&Agpu, &pitch, Nmax * sizeof(dtype), Nmax); const int lin1 = __LINE__;
switch (err1) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin1);
#endif // !NDEBUG
break;
case cudaErrorMemoryAllocation:
(void)fprintf(stderr, "[%s@%s:%d] MemoryAllocation\n", __FUNCTION__, __FILE__, lin1);
exit(err1);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin1, err1);
exit(err1);
}
lda = int(pitch / sizeof(dtype));
#ifndef NDEBUG
(void)fprintf(stdout, "lda = %d\n", lda);
#endif // !NDEBUG
const double end = (omp_get_wtime() - go);
// don't time clearing the memory
const cudaError_t err2 = cudaMemset2D(Agpu, pitch, 0, pitch, Nmax); const int lin2 = __LINE__;
switch (err2) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin2);
#endif // !NDEBUG
break;
case cudaErrorInvalidValue:
(void)fprintf(stderr, "[%s@%s:%d] InvalidValue\n", __FUNCTION__, __FILE__, lin2);
exit(err2);
case cudaErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin2);
exit(err2);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin2, err2);
exit(err2);
}
return end;
}
static double find_lwork()
{
const double go = omp_get_wtime();
int LworkL = -1, maxLworkL = 0;
int LworkU = -1, maxLworkU = 0;
for (int n = Nmin; n <= Nmax; n += Nstep) {
const cusolverStatus_t stat1 = cusolverDnXpotrf_bufferSize(handle, CUBLAS_FILL_MODE_LOWER, n, Agpu, lda, &LworkL); const int lin1 = __LINE__;
switch (stat1) {
case CUSOLVER_STATUS_SUCCESS:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] SUCCESS\n", __FUNCTION__, __FILE__, lin1);
#endif // !NDEBUG
break;
case CUSOLVER_STATUS_NOT_INITIALIZED:
(void)fprintf(stderr, "[%s@%s:%d] NOT_INITIALIZED\n", __FUNCTION__, __FILE__, lin1);
exit(stat1);
case CUSOLVER_STATUS_INVALID_VALUE:
(void)fprintf(stderr, "[%s@%s:%d] INVALID_VALUE\n", __FUNCTION__, __FILE__, lin1);
exit(stat1);
case CUSOLVER_STATUS_ARCH_MISMATCH:
(void)fprintf(stderr, "[%s@%s:%d] ARCH_MISMATCH\n", __FUNCTION__, __FILE__, lin1);
exit(stat1);
case CUSOLVER_STATUS_INTERNAL_ERROR:
(void)fprintf(stderr, "[%s@%s:%d] INTERNAL_ERROR\n", __FUNCTION__, __FILE__, lin1);
exit(stat1);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin1, stat1);
exit(stat1);
}
if (LworkL > maxLworkL)
maxLworkL = LworkL;
const cusolverStatus_t stat2 = cusolverDnXpotrf_bufferSize(handle, CUBLAS_FILL_MODE_UPPER, n, Agpu, lda, &LworkU); const int lin2 = __LINE__;
switch (stat2) {
case CUSOLVER_STATUS_SUCCESS:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] SUCCESS\n", __FUNCTION__, __FILE__, lin2);
#endif // !NDEBUG
break;
case CUSOLVER_STATUS_NOT_INITIALIZED:
(void)fprintf(stderr, "[%s@%s:%d] NOT_INITIALIZED\n", __FUNCTION__, __FILE__, lin2);
exit(stat2);
case CUSOLVER_STATUS_INVALID_VALUE:
(void)fprintf(stderr, "[%s@%s:%d] INVALID_VALUE\n", __FUNCTION__, __FILE__, lin2);
exit(stat2);
case CUSOLVER_STATUS_ARCH_MISMATCH:
(void)fprintf(stderr, "[%s@%s:%d] ARCH_MISMATCH\n", __FUNCTION__, __FILE__, lin2);
exit(stat2);
case CUSOLVER_STATUS_INTERNAL_ERROR:
(void)fprintf(stderr, "[%s@%s:%d] INTERNAL_ERROR\n", __FUNCTION__, __FILE__, lin2);
exit(stat2);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin2, stat2);
exit(stat2);
}
if (LworkU > maxLworkU)
maxLworkU = LworkU;
}
Lwork = ((maxLworkL >= maxLworkU) ? maxLworkL : maxLworkU);
#ifndef NDEBUG
(void)fprintf(stdout, "Lwork = %d, LworkL = %d, LworkU = %d\n", Lwork, maxLworkL, maxLworkU);
#endif // !NDEBUG
return (omp_get_wtime() - go);
}
static double alloc_gpu_wrk()
{
const double go = omp_get_wtime();
if (Lwork > 0) {
const cudaError_t err1 = cudaMalloc(&Workspace, (Lwork + 1) * sizeof(dtype)); const int lin1 = __LINE__;
switch (err1) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin1);
#endif // !NDEBUG
break;
case cudaErrorMemoryAllocation:
(void)fprintf(stderr, "[%s@%s:%d] MemoryAllocation\n", __FUNCTION__, __FILE__, lin1);
exit(err1);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin1, err1);
exit(err1);
}
const cudaError_t err2 = cudaMemset(Workspace, 0, (Lwork + 1) * sizeof(dtype)); const int lin2 = __LINE__;
switch (err2) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin2);
#endif // !NDEBUG
break;
case cudaErrorInvalidValue:
(void)fprintf(stderr, "[%s@%s:%d] InvalidValue\n", __FUNCTION__, __FILE__, lin2);
exit(err2);
case cudaErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin2);
exit(err2);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin2, err2);
exit(err2);
}
}
return (omp_get_wtime() - go);
}
static double alloc_cpu_mtx()
{
const double go = omp_get_wtime();
const size_t size = size_t(lda) * Nmax * sizeof(dtype);
const cudaError_t error = cudaMallocHost(&Acpu, size); const int lin = __LINE__;
switch (error) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case cudaErrorMemoryAllocation:
(void)fprintf(stderr, "[%s@%s:%d] MemoryAllocation\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
const double end = (omp_get_wtime() - go);
// don't time clearing the memory
(void)memset(Acpu, 0, size);
return end;
}
static double init_cpu_mtx()
{
const double go = omp_get_wtime();
static const int idist = 1;
int iseed[4] = { 0, 1, 2, 3 };
const int k = Nmax - 1;
int info = 0;
dtype *const wrk = (dtype*)calloc(3 * Nmax, sizeof(dtype)); const int lin1 = __LINE__;
if (!wrk) {
(void)fprintf(stderr, "[%s@%s:%d,%d] ", __FUNCTION__, __FILE__, lin1, errno);
perror("calloc");
exit(errno);
}
#ifdef USE_COMPLEX
// Diagonal
REAL_LAPACK(larnv)(&idist, iseed, &Nmax, (btype*)wrk);
// Acpu
CMPLX_LAPACK(laghe)(&Nmax, &k, (btype*)wrk, (MKL_Complex*)Acpu, &lda, iseed, (MKL_Complex*)(wrk + Nmax), &info); const int lin2 = __LINE__;
#else // USE_REAL
// Diagonal
REAL_LAPACK(larnv)(&idist, iseed, &Nmax, wrk);
// Acpu
REAL_LAPACK(lagsy)(&Nmax, &k, wrk, Acpu, &lda, iseed, wrk + Nmax, &info); const int lin2 = __LINE__;
#endif // USE_COMPLEX
if (info) {
(void)fprintf(stderr, "[%s@%s:%d] INFO = %d\n", __FUNCTION__, __FILE__, lin2, info);
exit(info);
}
free(wrk);
return (omp_get_wtime() - go);
}
static double copy_mtx_cpu2gpu(const int n)
{
const double go = omp_get_wtime();
if (n >= Nmin) {
if (n > Nmax) {
(void)fprintf(stderr, "[%s@%s] n == %d > Nmax == %d\n", __FUNCTION__, __FILE__, n, Nmax);
exit(n);
}
const size_t pitch = lda * sizeof(dtype);
const cudaError_t error = cudaMemcpy2D(Agpu, pitch, Acpu, pitch, n * sizeof(dtype), n, cudaMemcpyHostToDevice); const int lin1 = __LINE__;
switch (error) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin1);
#endif // !NDEBUG
break;
case cudaErrorInvalidValue:
(void)fprintf(stderr, "[%s@%s:%d] InvalidValue\n", __FUNCTION__, __FILE__, lin1);
exit(error);
case cudaErrorInvalidPitchValue:
(void)fprintf(stderr, "[%s@%s:%d] InvalidPitchValue\n", __FUNCTION__, __FILE__, lin1);
exit(error);
case cudaErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin1);
exit(error);
case cudaErrorInvalidMemcpyDirection:
(void)fprintf(stderr, "[%s@%s:%d] InvalidMemcpyDirection\n", __FUNCTION__, __FILE__, lin1);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin1, error);
exit(error);
}
// just to be sure...
(void)cudaDeviceSynchronize();
}
else {
(void)fprintf(stderr, "[%s@%s] n == %d < Nmin == %d\n", __FUNCTION__, __FILE__, n, Nmin);
exit(n);
}
return (omp_get_wtime() - go);
}
static double potrf_gpu(const bool upper, const int n)
{
const double go = omp_get_wtime();
if (n >= Nmin) {
if (n > Nmax) {
(void)fprintf(stderr, "[%s@%s] n == %d > Nmax == %d\n", __FUNCTION__, __FILE__, n, Nmax);
exit(n);
}
const cublasFillMode_t uplo = (upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER);
const cusolverStatus_t status = cusolverDnXpotrf(handle, uplo, n, Agpu, lda, Workspace, Lwork, (int*)(Workspace + Lwork)); const int lin1 = __LINE__;
int devInfo = 0;
(void)cudaDeviceSynchronize();
const cudaError_t error = cudaMemcpy(&devInfo, Workspace + Lwork, sizeof(int), cudaMemcpyDeviceToHost); const int lin2 = __LINE__;
(void)cudaDeviceSynchronize();
switch (error) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin2);
#endif // !NDEBUG
break;
case cudaErrorInvalidValue:
(void)fprintf(stderr, "[%s@%s:%d] InvalidValue\n", __FUNCTION__, __FILE__, lin2);
exit(error);
case cudaErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin2);
exit(error);
case cudaErrorInvalidMemcpyDirection:
(void)fprintf(stderr, "[%s@%s:%d] InvalidMemcpyDirection\n", __FUNCTION__, __FILE__, lin2);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin2, error);
exit(error);
}
switch (status) {
case CUSOLVER_STATUS_SUCCESS:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d,%d] SUCCESS\n", __FUNCTION__, __FILE__, lin1, devInfo);
#endif // !NDEBUG
break;
case CUSOLVER_STATUS_NOT_INITIALIZED:
(void)fprintf(stderr, "[%s@%s:%d,%d] NOT_INITIALIZED\n", __FUNCTION__, __FILE__, lin1, devInfo);
exit(status);
case CUSOLVER_STATUS_INVALID_VALUE:
(void)fprintf(stderr, "[%s@%s:%d,%d] INVALID_VALUE\n", __FUNCTION__, __FILE__, lin1, devInfo);
exit(status);
case CUSOLVER_STATUS_ARCH_MISMATCH:
(void)fprintf(stderr, "[%s@%s:%d,%d] ARCH_MISMATCH\n", __FUNCTION__, __FILE__, lin1, devInfo);
exit(status);
case CUSOLVER_STATUS_INTERNAL_ERROR:
(void)fprintf(stderr, "[%s@%s:%d,%d] INTERNAL_ERROR\n", __FUNCTION__, __FILE__, lin1, devInfo);
exit(status);
default:
(void)fprintf(stderr, "[%s@%s:%d,%d] unknown error %d\n", __FUNCTION__, __FILE__, lin1, devInfo, status);
exit(status);
}
if (devInfo) {
(void)fprintf(stderr, "[%s@%s:%d] INFO = %d\n", __FUNCTION__, __FILE__, lin1, devInfo);
exit(devInfo);
}
}
else {
(void)fprintf(stderr, "[%s@%s] n == %d < Nmin == %d\n", __FUNCTION__, __FILE__, n, Nmin);
exit(n);
}
return (omp_get_wtime() - go);
}
static double free_cpu_mtx()
{
const double go = omp_get_wtime();
if (Acpu) {
const cudaError_t error = cudaFreeHost(Acpu); const int lin = __LINE__;
switch (error) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case cudaErrorInitializationError:
(void)fprintf(stderr, "[%s@%s:%d] InitializationError\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
Acpu = (dtype*)NULL;
}
return (omp_get_wtime() - go);
}
static double free_gpu_wrk()
{
const double go = omp_get_wtime();
if (Workspace) {
const cudaError_t error = cudaFree(Workspace); const int lin = __LINE__;
switch (error) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case cudaErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin);
exit(error);
case cudaErrorInitializationError:
(void)fprintf(stderr, "[%s@%s:%d] InitializationError\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
Workspace = (dtype*)NULL;
}
return (omp_get_wtime() - go);
}
static double free_gpu_mtx()
{
const double go = omp_get_wtime();
const cudaError_t error = cudaFree(Agpu); const int lin = __LINE__;
switch (error) {
case cudaSuccess:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] Success\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case cudaErrorInvalidDevicePointer:
(void)fprintf(stderr, "[%s@%s:%d] InvalidDevicePointer\n", __FUNCTION__, __FILE__, lin);
exit(error);
case cudaErrorInitializationError:
(void)fprintf(stderr, "[%s@%s:%d] InitializationError\n", __FUNCTION__, __FILE__, lin);
exit(error);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, error);
exit(error);
}
Agpu = (dtype*)NULL;
return (omp_get_wtime() - go);
}
static double destroy_handle()
{
const double go = omp_get_wtime();
const cusolverStatus_t status = cusolverDnDestroy(handle); const int lin = __LINE__;
switch (status) {
case CUSOLVER_STATUS_SUCCESS:
#ifndef NDEBUG
(void)fprintf(stdout, "[%s@%s:%d] SUCCESS\n", __FUNCTION__, __FILE__, lin);
#endif // !NDEBUG
break;
case CUSOLVER_STATUS_NOT_INITIALIZED:
(void)fprintf(stderr, "[%s@%s:%d] NOT_INITIALIZED\n", __FUNCTION__, __FILE__, lin);
exit(status);
default:
(void)fprintf(stderr, "[%s@%s:%d] unknown error %d\n", __FUNCTION__, __FILE__, lin, status);
exit(status);
}
return (omp_get_wtime() - go);
}
int main(int argc, char* argv[])
{
if ((argc < 5) || (argc > 6)) {
(void)fprintf(stderr, "%s Nmin Nmax Nstep #samples [device#]\n", *argv);
return EXIT_FAILURE;
}
if ((Nmin = atoi(argv[1])) <= 0) {
(void)fprintf(stderr, "Nmin == %d <= 0\n", Nmin);
return EXIT_FAILURE;
}
if ((Nmax = atoi(argv[2])) <= 0) {
(void)fprintf(stderr, "Nmax == %d <= 0\n", Nmax);
return EXIT_FAILURE;
}
if (Nmax < Nmin) {
(void)fprintf(stderr, "Nmax == %d < Nmin == %d\n", Nmax, Nmin);
return EXIT_FAILURE;
}
if ((Nstep = atoi(argv[3])) <= 0) {
(void)fprintf(stderr, "Nstep == %d <= 0\n", Nstep);
return EXIT_FAILURE;
}
if ((_samples = atoi(argv[4])) <= 0) {
(void)fprintf(stderr, "#samples == %d <= 0\n", _samples);
return EXIT_FAILURE;
}
if ((argc == 6) && ((device_ = atoi(argv[5])) < 0)) {
(void)fprintf(stderr, "device# == %d < 0\n", device_);
return EXIT_FAILURE;
}
const char *const env_nthr = getenv("MKL_NUM_THREADS");
if (!env_nthr) {
(void)fprintf(stderr, "MKL_NUM_THREADS environment variable not set\n");
return EXIT_FAILURE;
}
const int mkl_nthr = atoi(env_nthr);
if (mkl_nthr <= 0) {
(void)fprintf(stderr, "MKL_NUM_THREADS = %d <= 0\n", mkl_nthr);
return EXIT_FAILURE;
}
const double resol = omp_get_wtick();
#ifndef NDEBUG
(void)fprintf(stdout, "[omp_get_wtick] %#.17E s\n", resol);
#endif // !NDEBUG
(void)device_count();
if (device_ > _devices) {
(void)fprintf(stderr, "device# == %d > #devices == %d\n", device_, _devices);
return EXIT_FAILURE;
}
(void)set_device();
(void)create_handle();
const double agpu_time = alloc_gpu_mtx();
const double lwrk_time = find_lwork();
double awrk_time = alloc_gpu_wrk();
// number of tests
int ntst = (Nmax - Nmin) + 1;
// at least one test
ntst = ((ntst < Nstep) ? 1 : (ntst / Nstep));
// add the average time to find Lwork
awrk_time += lwrk_time / (2 * ntst);
const double acpu_time = alloc_cpu_mtx();
const double init_time = init_cpu_mtx();
#ifndef NDEBUG
(void)fprintf(stdout, "[init_cpu_mtx] %#.17E s\n", init_time);
#endif // !NDEBUG
(void)fprintf(stdout, "\"N\",\"COPY_H2D_MIN_s\",\"COPY_H2D_AVG_s\",\"COPY_H2D_MAX_s\",\"LPOTRF_MIN_s\",\"LPOTRF_AVG_s\",\"LPOTRF_MAX_s\",\"UPOTRF_MIN_s\",\"UPOTRF_AVG_s\",\"UPOTRF_MAX_s\"\n");
(void)fflush(stdout);
for (int n = Nmin; n <= Nmax; n += Nstep) {
double Lcopy_times_min = INFINITY;
double Lcopy_times_max = -0.0;
double Lcopy_times_avg = -0.0;
double Ucopy_times_min = INFINITY;
double Ucopy_times_max = -0.0;
double Ucopy_times_avg = -0.0;
double Lpotrf_times_min = INFINITY;
double Lpotrf_times_max = -0.0;
double Lpotrf_times_avg = -0.0;
double Upotrf_times_min = INFINITY;
double Upotrf_times_max = -0.0;
double Upotrf_times_avg = -0.0;
for (int sample = 0; sample < _samples; ++sample) {
const double Lcopy_time = copy_mtx_cpu2gpu(n);
#ifndef NDEBUG
(void)fprintf(stdout, "[copy_mtx_cpu2gpu(%d),%d,L] %#.17E s\n", n, sample, Lcopy_time);
#endif // !NDEBUG
if (Lcopy_time < Lcopy_times_min)
Lcopy_times_min = Lcopy_time;
if (Lcopy_time > Lcopy_times_max)
Lcopy_times_max = Lcopy_time;
Lcopy_times_avg += Lcopy_time / _samples;
const double Lpotrf_time = potrf_gpu(false, n);
#ifndef NDEBUG
(void)fprintf(stdout, "[potrf_gpu(%d),%d,L] %#.17E s\n", n, sample, Lpotrf_time);
#endif // !NDEBUG
if (Lpotrf_time < Lpotrf_times_min)
Lpotrf_times_min = Lpotrf_time;
if (Lpotrf_time > Lpotrf_times_max)
Lpotrf_times_max = Lpotrf_time;
Lpotrf_times_avg += Lpotrf_time / _samples;
const double Ucopy_time = copy_mtx_cpu2gpu(n);
#ifndef NDEBUG
(void)fprintf(stdout, "[copy_mtx_cpu2gpu(%d),%d,U] %#.17E s\n", n, sample, Ucopy_time);
#endif // !NDEBUG
if (Ucopy_time < Ucopy_times_min)
Ucopy_times_min = Ucopy_time;
if (Ucopy_time > Ucopy_times_max)
Ucopy_times_max = Ucopy_time;
Ucopy_times_avg += Ucopy_time / _samples;
const double Upotrf_time = potrf_gpu(true, n);
#ifndef NDEBUG
(void)fprintf(stdout, "[potrf_gpu(%d),%d,U] %#.17E s\n", n, sample, Upotrf_time);
#endif // !NDEBUG
if (Upotrf_time < Upotrf_times_min)
Upotrf_times_min = Upotrf_time;
if (Upotrf_time > Upotrf_times_max)
Upotrf_times_max = Upotrf_time;
Upotrf_times_avg += Upotrf_time / _samples;
}
const double copy_times_min = ((Lcopy_times_min <= Ucopy_times_min) ? Lcopy_times_min : Ucopy_times_min);
const double copy_times_max = ((Lcopy_times_max >= Ucopy_times_max) ? Lcopy_times_max : Ucopy_times_max);
const double copy_times_avg = (Lcopy_times_avg + Ucopy_times_avg) / 2;
(void)fprintf(stdout, lin_fmt, n,
copy_times_min, copy_times_avg, copy_times_max,
Lpotrf_times_min, Lpotrf_times_avg, Lpotrf_times_max,
Upotrf_times_min, Upotrf_times_avg, Upotrf_times_max);
(void)fflush(stdout);
}
const double fcpu_time = free_cpu_mtx();
const double fgpu_time = free_gpu_wrk();
const double fwrk_time = free_gpu_mtx();
(void)destroy_handle();
(void)fprintf(stdout, lin_fmt, -_samples,
resol, double(mkl_nthr), init_time,
acpu_time, agpu_time, awrk_time,
fcpu_time, fgpu_time, fwrk_time);
(void)fflush(stdout);
return EXIT_SUCCESS;
}
|
a296571f3facb19d0f1451e5eac5280568ac99b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#include <rocm_smi/rocm_smi.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_4__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_5__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
double b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8);
int __iter_y__ = FORMA_MAX((int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y)-4, 0);
// Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*__iter_y__];
t2 = input[__iter_3__+M*(__iter_y__+1)];
}
// Initial computation
for (int __iter_1__ = FORMA_MAX(1,__iter_y__+1); __iter_1__ < FORMA_MIN(N-1,__iter_y__+7); __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
double __temp_1__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_2__[__iter_3__-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_18__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
double __temp_1__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_3__[__iter_3__-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
double __temp_1__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_4__[__iter_3__-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_18__;
}
}
// Rest of the computation
for (int __iter_1__ = FORMA_MAX(1,__iter_y__+7); __iter_1__ < FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+8); __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
double __temp_1__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_2__[__iter_3__-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_18__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
double __temp_1__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_3__[__iter_3__-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
double __temp_1__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_4__[__iter_3__-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
double __temp_1__ = (__tilevar_5__[__iter_3__-__iter_0__] - b5);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_5__[__iter_3__-__iter_0__] - t5);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_5__[__iter_3__-__iter_0__] + __temp_17__);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_18__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gradient (double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
hipMalloc(&input,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input);
}
double * __var_1__;
hipMalloc(&__var_1__,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__, __size_1___kernel___forma_kernel__0__/64);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
unsigned int power1, power2;
rsmi_status_t result;
uint32_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(RSMI_STATUS_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(RSMI_STATUS_SUCCESS == result);
hipDeviceSynchronize();
for (int x=0; x<1000; x++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_1___kernel___forma_kernel__0__/64, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
hipDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(RSMI_STATUS_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
}
/*Host Free End*/
| a296571f3facb19d0f1451e5eac5280568ac99b7.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#include <nvml.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, double * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
double * __tilevar_2__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_3__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_4__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double * __tilevar_5__ = (double*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(double)*(FORMA_BLOCKDIM_X);
double t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
double b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8);
int __iter_y__ = FORMA_MAX((int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y)-4, 0);
// Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*__iter_y__];
t2 = input[__iter_3__+M*(__iter_y__+1)];
}
// Initial computation
for (int __iter_1__ = FORMA_MAX(1,__iter_y__+1); __iter_1__ < FORMA_MIN(N-1,__iter_y__+7); __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
double __temp_1__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_2__[__iter_3__-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_18__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
double __temp_1__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_3__[__iter_3__-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
double __temp_1__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_4__[__iter_3__-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_18__;
}
}
// Rest of the computation
for (int __iter_1__ = FORMA_MAX(1,__iter_y__+7); __iter_1__ < FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+8); __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
double __temp_1__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_2__[__iter_3__-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_18__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
double __temp_1__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_3__[__iter_3__-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
double __temp_1__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_4__[__iter_3__-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_18__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
double __temp_1__ = (__tilevar_5__[__iter_3__-__iter_0__] - b5);
double __temp_2__ = (__temp_1__ * __temp_1__);
double __temp_3__ = (0.000100f + __temp_2__);
double __temp_5__ = (__tilevar_5__[__iter_3__-__iter_0__] - t5);
double __temp_6__ = (__temp_5__ * __temp_5__);
double __temp_7__ = (__temp_3__ + __temp_6__);
double __temp_9__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__+1-__iter_0__]);
double __temp_10__ = (__temp_9__ * __temp_9__);
double __temp_11__ = (__temp_7__ + __temp_10__);
double __temp_13__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__-1-__iter_0__]);
double __temp_14__ = (__temp_13__ * __temp_13__);
double __temp_15__ = (__temp_11__ + __temp_14__);
double __temp_16__ = sqrt(__temp_15__);
double __temp_17__ = (1.000000f / __temp_16__);
double __temp_18__ = (__tilevar_5__[__iter_3__-__iter_0__] + __temp_17__);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_18__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(double)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gradient (double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
cudaMalloc(&input,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(double)*((N)*(M)), memcpy_kind_h_input);
}
double * __var_1__;
cudaMalloc(&__var_1__,sizeof(double)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__, __size_1___kernel___forma_kernel__0__/64);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
unsigned int power1, power2;
nvmlReturn_t result;
nvmlDevice_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(NVML_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(NVML_SUCCESS == result);
cudaDeviceSynchronize();
for (int x=0; x<1000; x++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_1___kernel___forma_kernel__0__/64, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
cudaDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(NVML_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(double)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
}
/*Host Free End*/
|
71ce209417498d943f9e4b32a6c165d02419e85a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
struct GlobalConstants {
float3* points;
int numPoints;
int k;
uint* clusters;
};
__constant__ GlobalConstants params;
__global__ void kernelComputeCluster(int numPoints, int k) {
int modPoint = threadIdx.x;
int point = blockIdx.x *1024 + threadIdx.x;
int x = params.points[point].x;
int y = params.points[point].y;
int z = params.points[point].z;
__shared__ uint prevCluster[1024];
__shared__ uint cluster[1024];
__shared__ float3 centers[k];
__shared__ float totals[k];
__shared__ int cont = 1;
randomAssignment(centers);
while (cont) {
cont = 0
min = 0
minval = (x-centers[0].x)^2 + (y-centers[0].y)^2 + (z-centers[0].z)^2;
for(int i=1; i<k; i++) {
float temp = (x-centers[i].x)^2 + (y-centers[i].y)^2 + (z-centers[i].z)^2;
if (temp < minval) {
min = i;
minval = temp;
}
}
cluster[point] = min;
if (prevCluster[point] != cluster[point]) {
cont = 1;
}
__syncthreads();
if(point < 3*k) {
((float*) centers)[point] = 0;
if (point%3==0) totals[point/k] = 0;
for(int i=0; i<1024; i++) {
if (cluster[i]==point/k) {
centers[point] += ((float*)params.points)[i*3+(k%3)]
if (point%3==0) totals[point/k] += 1;
}
}
__syncthreads();
centers[point] /= totals[point/k]
}
prevCluster = cluster;
}
params.clusters = cluster;
}
void
kmeans::kmeans() {
dim3 blockDim(1024);
dim3 gridDim((params.numPoints + 1023)/1024);
hipLaunchKernelGGL(( kernelComputeCluster), dim3(gridDim),dim3(blockDim), 0, 0, params.numPoints, params.k);
hipDeviceSynchronize();
}
| 71ce209417498d943f9e4b32a6c165d02419e85a.cu | struct GlobalConstants {
float3* points;
int numPoints;
int k;
uint* clusters;
};
__constant__ GlobalConstants params;
__global__ void kernelComputeCluster(int numPoints, int k) {
int modPoint = threadIdx.x;
int point = blockIdx.x *1024 + threadIdx.x;
int x = params.points[point].x;
int y = params.points[point].y;
int z = params.points[point].z;
__shared__ uint prevCluster[1024];
__shared__ uint cluster[1024];
__shared__ float3 centers[k];
__shared__ float totals[k];
__shared__ int cont = 1;
randomAssignment(centers);
while (cont) {
cont = 0
min = 0
minval = (x-centers[0].x)^2 + (y-centers[0].y)^2 + (z-centers[0].z)^2;
for(int i=1; i<k; i++) {
float temp = (x-centers[i].x)^2 + (y-centers[i].y)^2 + (z-centers[i].z)^2;
if (temp < minval) {
min = i;
minval = temp;
}
}
cluster[point] = min;
if (prevCluster[point] != cluster[point]) {
cont = 1;
}
__syncthreads();
if(point < 3*k) {
((float*) centers)[point] = 0;
if (point%3==0) totals[point/k] = 0;
for(int i=0; i<1024; i++) {
if (cluster[i]==point/k) {
centers[point] += ((float*)params.points)[i*3+(k%3)]
if (point%3==0) totals[point/k] += 1;
}
}
__syncthreads();
centers[point] /= totals[point/k]
}
prevCluster = cluster;
}
params.clusters = cluster;
}
void
kmeans::kmeans() {
dim3 blockDim(1024);
dim3 gridDim((params.numPoints + 1023)/1024);
kernelComputeCluster<<<gridDim,blockDim>>>(params.numPoints, params.k);
cudaDeviceSynchronize();
}
|
70585a3fd76e8e5e3e4838003911e6bae1cd485f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void sgemm_nn_vec_32x128(
float* param_C,
const float* param_A,
const float* param_B,
float param_alpha,
float param_beta,
int param_flags,
int param_lda,
int param_ldb,
int param_ldc,
int param_m,
int param_n,
int param_k,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
) {
__shared__ float share[(32*16 + 32)*2 + (128*16 + 0)*2 + 4];
*param_C = share[0];
}
| 70585a3fd76e8e5e3e4838003911e6bae1cd485f.cu |
extern "C" __global__ void sgemm_nn_vec_32x128(
float* param_C,
const float* param_A,
const float* param_B,
float param_alpha,
float param_beta,
int param_flags,
int param_lda,
int param_ldb,
int param_ldc,
int param_m,
int param_n,
int param_k,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
) {
__shared__ float share[(32*16 + 32)*2 + (128*16 + 0)*2 + 4];
*param_C = share[0];
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.