hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
00b1523a92ace2c1463aa8d45f9a5e2bb3363959.hip
|
// !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR
//--blockDim=256 --gridDim=2 --no-inline
//Write by thread [\d]+ in block [\d]+, .+kernel\.cu:9:21:
#include <hip/hip_runtime.h>
__global__ void curand_test(hiprandStateMtgp32_t *state, float *A) {
if (threadIdx.x == 0) {
A[blockIdx.x] = hiprand(state);
}
}
|
00b1523a92ace2c1463aa8d45f9a5e2bb3363959.cu
|
//xfail:BOOGIE_ERROR
//--blockDim=256 --gridDim=2 --no-inline
//Write by thread [\d]+ in block [\d]+, .+kernel\.cu:9:21:
#include <cuda.h>
__global__ void curand_test(curandStateMtgp32_t *state, float *A) {
if (threadIdx.x == 0) {
A[blockIdx.x] = curand(state);
}
}
|
eac2c36cdaf394f7522526aacf01076c9d334f2e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/partition.h>
#include <glm/gtc/matrix_inverse.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
struct is_path_complete {
__host__ __device__ bool operator()(const PathSegment & pathSegment) {
return pathSegment.remainingBounces > 0;
}
};
struct compare_materials {
__host__ __device__ bool operator()(const ShadeableIntersection & shadeableIntersection1, const ShadeableIntersection & shadeableIntersection2) {
return shadeableIntersection1.materialId < shadeableIntersection2.materialId;
}
};
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Light * dev_lights = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static float3 * dev_albedos = NULL;
static float3 * dev_normals = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static ShadeableIntersection * dev_intersections_cache = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
clock_t timer, blurTimer;
double iteration_time = 0;
double total_time = 0;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
hipMalloc(&dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections_cache, 0, pixelcount * sizeof(ShadeableIntersection));
hipMalloc(&dev_lights, scene->lights.size() * sizeof(Light));
hipMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Light), hipMemcpyHostToDevice);
hipMalloc(&dev_albedos, pixelcount * sizeof(float3));
hipMalloc(&dev_normals, pixelcount * sizeof(float3));
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// TODO: clean up any extra device memory you created
hipFree(dev_intersections_cache);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
glm::vec2 jitter = glm::vec2(0.0f, 0.0f);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0.0f, 1.0f);
if (ANTI_ALIASING) {
jitter = glm::vec2(u01(rng) - 0.5f, u01(rng) - 0.5f);
}
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x + jitter.x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y + jitter.y - (float)cam.resolution.y * 0.5f)
);
if (DEPTH_OF_FIELD) {
glm::vec3 focus = segment.ray.origin + (cam.focalLength) * segment.ray.direction;
segment.ray.origin.x += max(cam.radius, 0.0f) * (u01(rng) - 0.5f);
segment.ray.origin.y += max(cam.radius, 0.0f) * (u01(rng) - 0.5f);
segment.ray.direction = glm::normalize(focus - segment.ray.origin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
, float3 *dev_albedos
, float3 *dev_normals
, int iter
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
if (DENOISE && iter == 0) {
dev_normals[path_index].x = (intersections[path_index].t < 0.0) ? 0.0f : intersections[path_index].surfaceNormal.x;
dev_normals[path_index].y = (intersections[path_index].t < 0.0) ? 0.0f : intersections[path_index].surfaceNormal.y;
dev_normals[path_index].z = (intersections[path_index].t < 0.0) ? 0.0f : intersections[path_index].surfaceNormal.z;
dev_albedos[path_index].x = (intersections[path_index].t < 0.0) ? 0.0f : pathSegment.color.x;
dev_albedos[path_index].y = (intersections[path_index].t < 0.0) ? 0.0f : pathSegment.color.y;
dev_albedos[path_index].z = (intersections[path_index].t < 0.0) ? 0.0f : pathSegment.color.z;
}
}
}
__global__ void computeNewLocations(
Geom * geoms
, int geoms_size
, float dt
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= geoms_size) {
return;
}
geoms[idx].translation = geoms[idx].translation + geoms[idx].velocity * dt;
glm::mat4 translationMat = glm::translate(glm::mat4(), geoms[idx].translation);
glm::mat4 rotationMat = glm::rotate(glm::mat4(), geoms[idx].rotation.x * (float)PI / 180, glm::vec3(1, 0, 0));
rotationMat = rotationMat * glm::rotate(glm::mat4(), geoms[idx].rotation.y * (float)PI / 180, glm::vec3(0, 1, 0));
rotationMat = rotationMat * glm::rotate(glm::mat4(), geoms[idx].rotation.z * (float)PI / 180, glm::vec3(0, 0, 1));
glm::mat4 scaleMat = glm::scale(glm::mat4(), geoms[idx].scale);
geoms[idx].transform = translationMat * rotationMat * scaleMat;
geoms[idx].inverseTransform = glm::inverse(geoms[idx].transform);
geoms[idx].invTranspose = glm::inverseTranspose(geoms[idx].transform);
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int depth
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int num_lights
, Light * lights
, int num_geoms
, Geom * geoms
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pathSegments[idx].remainingBounces <= 0) {
return;
}
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance * glm::abs(glm::dot(pathSegments[idx].ray.direction, intersection.surfaceNormal)));
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
if (pathSegments[idx].remainingBounces > 0) {
scatterRay(pathSegments[idx], getPointOnRay(pathSegments[idx].ray, intersection.t), intersection.surfaceNormal, material, rng);
if (pathSegments[idx].remainingBounces == 0 && DIRECT_LIGHTING) {
directLight(num_lights, lights, num_geoms, geoms, materials, getPointOnRay(pathSegments[idx].ray, intersection.t), intersection.surfaceNormal, pathSegments[idx], rng);
}
//pathSegments[idx].color = glm::clamp(pathSegments[idx].color, glm::vec3(0.0f), glm::vec3(1.0f));
}
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter, float3* albedos, float3* normals) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;;
if (TIMER) {
timer = clock();
}
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
if (iter > 0 && MOTION_BLUR) {
computeNewLocations << <numblocksPathSegmentTracing, blockSize1d >> > (
dev_geoms,
hst_scene->geoms.size(),
((double)(clock() - blurTimer)) / CLOCKS_PER_SEC);
}
blurTimer = clock();
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
if (depth == 0 && CACHE_FIRST_BOUNCE && !ANTI_ALIASING && !MOTION_BLUR) {
// tracing
if (iter == 1) {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections_cache
, dev_albedos
, dev_normals
, iter
);
checkCUDAError("trace one bounce");
}
hipMemcpy(dev_intersections, dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
else {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
, dev_albedos
, dev_normals
, iter
);
checkCUDAError("trace one bounce");
}
hipDeviceSynchronize();
depth++;
if (SORT_BY_MATERIAL && num_paths > 0) {
thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_materials());
}
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shadeFakeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
depth,
num_paths,
dev_intersections,
dev_paths,
dev_materials,
hst_scene->lights.size(),
dev_lights,
hst_scene->geoms.size(),
dev_geoms
);
if (STREAM_COMPACT) {
dev_path_end = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, is_path_complete());
num_paths = dev_path_end - dev_paths;
}
iterationComplete = num_paths == 0 || depth > traceDepth; // TODO: should be based off stream compaction results.
}
if (TIMER) {
timer = clock() - timer;
iteration_time = ((double)timer) / CLOCKS_PER_SEC;
total_time += iteration_time;
}
num_paths = pixelcount;
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
// Retrieve image from GPU
hipMemcpy(albedos, dev_albedos,
pixelcount * sizeof(float3), hipMemcpyDeviceToHost);
// Retrieve image from GPU
hipMemcpy(normals, dev_normals,
pixelcount * sizeof(float3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
if (TIMER) {
printf("(Time Taken so Far, time for this iteration) : (%f, %f) \n", total_time, iteration_time);
}
}
|
eac2c36cdaf394f7522526aacf01076c9d334f2e.cu
|
#include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/partition.h>
#include <glm/gtc/matrix_inverse.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
struct is_path_complete {
__host__ __device__ bool operator()(const PathSegment & pathSegment) {
return pathSegment.remainingBounces > 0;
}
};
struct compare_materials {
__host__ __device__ bool operator()(const ShadeableIntersection & shadeableIntersection1, const ShadeableIntersection & shadeableIntersection2) {
return shadeableIntersection1.materialId < shadeableIntersection2.materialId;
}
};
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Light * dev_lights = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static float3 * dev_albedos = NULL;
static float3 * dev_normals = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static ShadeableIntersection * dev_intersections_cache = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
clock_t timer, blurTimer;
double iteration_time = 0;
double total_time = 0;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
cudaMalloc(&dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections_cache, 0, pixelcount * sizeof(ShadeableIntersection));
cudaMalloc(&dev_lights, scene->lights.size() * sizeof(Light));
cudaMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Light), cudaMemcpyHostToDevice);
cudaMalloc(&dev_albedos, pixelcount * sizeof(float3));
cudaMalloc(&dev_normals, pixelcount * sizeof(float3));
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// TODO: clean up any extra device memory you created
cudaFree(dev_intersections_cache);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
glm::vec2 jitter = glm::vec2(0.0f, 0.0f);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0.0f, 1.0f);
if (ANTI_ALIASING) {
jitter = glm::vec2(u01(rng) - 0.5f, u01(rng) - 0.5f);
}
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x + jitter.x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y + jitter.y - (float)cam.resolution.y * 0.5f)
);
if (DEPTH_OF_FIELD) {
glm::vec3 focus = segment.ray.origin + (cam.focalLength) * segment.ray.direction;
segment.ray.origin.x += max(cam.radius, 0.0f) * (u01(rng) - 0.5f);
segment.ray.origin.y += max(cam.radius, 0.0f) * (u01(rng) - 0.5f);
segment.ray.direction = glm::normalize(focus - segment.ray.origin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
, float3 *dev_albedos
, float3 *dev_normals
, int iter
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
if (DENOISE && iter == 0) {
dev_normals[path_index].x = (intersections[path_index].t < 0.0) ? 0.0f : intersections[path_index].surfaceNormal.x;
dev_normals[path_index].y = (intersections[path_index].t < 0.0) ? 0.0f : intersections[path_index].surfaceNormal.y;
dev_normals[path_index].z = (intersections[path_index].t < 0.0) ? 0.0f : intersections[path_index].surfaceNormal.z;
dev_albedos[path_index].x = (intersections[path_index].t < 0.0) ? 0.0f : pathSegment.color.x;
dev_albedos[path_index].y = (intersections[path_index].t < 0.0) ? 0.0f : pathSegment.color.y;
dev_albedos[path_index].z = (intersections[path_index].t < 0.0) ? 0.0f : pathSegment.color.z;
}
}
}
__global__ void computeNewLocations(
Geom * geoms
, int geoms_size
, float dt
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= geoms_size) {
return;
}
geoms[idx].translation = geoms[idx].translation + geoms[idx].velocity * dt;
glm::mat4 translationMat = glm::translate(glm::mat4(), geoms[idx].translation);
glm::mat4 rotationMat = glm::rotate(glm::mat4(), geoms[idx].rotation.x * (float)PI / 180, glm::vec3(1, 0, 0));
rotationMat = rotationMat * glm::rotate(glm::mat4(), geoms[idx].rotation.y * (float)PI / 180, glm::vec3(0, 1, 0));
rotationMat = rotationMat * glm::rotate(glm::mat4(), geoms[idx].rotation.z * (float)PI / 180, glm::vec3(0, 0, 1));
glm::mat4 scaleMat = glm::scale(glm::mat4(), geoms[idx].scale);
geoms[idx].transform = translationMat * rotationMat * scaleMat;
geoms[idx].inverseTransform = glm::inverse(geoms[idx].transform);
geoms[idx].invTranspose = glm::inverseTranspose(geoms[idx].transform);
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int depth
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
, int num_lights
, Light * lights
, int num_geoms
, Geom * geoms
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pathSegments[idx].remainingBounces <= 0) {
return;
}
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance * glm::abs(glm::dot(pathSegments[idx].ray.direction, intersection.surfaceNormal)));
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
if (pathSegments[idx].remainingBounces > 0) {
scatterRay(pathSegments[idx], getPointOnRay(pathSegments[idx].ray, intersection.t), intersection.surfaceNormal, material, rng);
if (pathSegments[idx].remainingBounces == 0 && DIRECT_LIGHTING) {
directLight(num_lights, lights, num_geoms, geoms, materials, getPointOnRay(pathSegments[idx].ray, intersection.t), intersection.surfaceNormal, pathSegments[idx], rng);
}
//pathSegments[idx].color = glm::clamp(pathSegments[idx].color, glm::vec3(0.0f), glm::vec3(1.0f));
}
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter, float3* albedos, float3* normals) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;;
if (TIMER) {
timer = clock();
}
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
if (iter > 0 && MOTION_BLUR) {
computeNewLocations << <numblocksPathSegmentTracing, blockSize1d >> > (
dev_geoms,
hst_scene->geoms.size(),
((double)(clock() - blurTimer)) / CLOCKS_PER_SEC);
}
blurTimer = clock();
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
if (depth == 0 && CACHE_FIRST_BOUNCE && !ANTI_ALIASING && !MOTION_BLUR) {
// tracing
if (iter == 1) {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections_cache
, dev_albedos
, dev_normals
, iter
);
checkCUDAError("trace one bounce");
}
cudaMemcpy(dev_intersections, dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
else {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
, dev_albedos
, dev_normals
, iter
);
checkCUDAError("trace one bounce");
}
cudaDeviceSynchronize();
depth++;
if (SORT_BY_MATERIAL && num_paths > 0) {
thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, compare_materials());
}
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shadeFakeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
depth,
num_paths,
dev_intersections,
dev_paths,
dev_materials,
hst_scene->lights.size(),
dev_lights,
hst_scene->geoms.size(),
dev_geoms
);
if (STREAM_COMPACT) {
dev_path_end = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, is_path_complete());
num_paths = dev_path_end - dev_paths;
}
iterationComplete = num_paths == 0 || depth > traceDepth; // TODO: should be based off stream compaction results.
}
if (TIMER) {
timer = clock() - timer;
iteration_time = ((double)timer) / CLOCKS_PER_SEC;
total_time += iteration_time;
}
num_paths = pixelcount;
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather<<<numBlocksPixels, blockSize1d>>>(num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
// Retrieve image from GPU
cudaMemcpy(albedos, dev_albedos,
pixelcount * sizeof(float3), cudaMemcpyDeviceToHost);
// Retrieve image from GPU
cudaMemcpy(normals, dev_normals,
pixelcount * sizeof(float3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
if (TIMER) {
printf("(Time Taken so Far, time for this iteration) : (%f, %f) \n", total_time, iteration_time);
}
}
|
28627d53eefc754685305d03debf1707f8e7ab88.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ u_char clamp(float t)
{
if (t < 0) {
return 0;
} else if (t > 255){
return 255;
}
return t;
}
__global__ void kernel_colorSpaceYUV420PToRGBA(dev_t *src, dev_t *dst, int pitch_src, int pitch_dst, int w, int h)
{
unsigned int dim_x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int dim_y = blockDim.y * blockIdx.y + threadIdx.y;
int r,g,b,y,u,v;
if (dim_x < w && dim_y < h) {
y = *((u_char*)src + dim_y * pitch_src + dim_x);
u = *((u_char*)src + (h + dim_y / 4) * pitch_src + dim_x / 2);
v = *((u_char*)src + (h * 5 + dim_y) / 4 * pitch_src + dim_x / 2);
r = clamp(y + 1.402 * (v - 128) + 0.5);
g = clamp(y - 0.34414 * (u - 128) - 0.71414 * (v - 128) + 0.5);
b = clamp(y + 1.772 * (u - 128) + 0.5);
// *((uint32_t*)dst + dim_y * pitch_dst / 4 + dim_x) = (r << 24) + (g << 16) + (b << 8);
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4) = r;
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4 + 1) = g;
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4 + 2) = b;
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4 + 3) = 255;
}
}
|
28627d53eefc754685305d03debf1707f8e7ab88.cu
|
#include "includes.h"
__device__ u_char clamp(float t)
{
if (t < 0) {
return 0;
} else if (t > 255){
return 255;
}
return t;
}
__global__ void kernel_colorSpaceYUV420PToRGBA(dev_t *src, dev_t *dst, int pitch_src, int pitch_dst, int w, int h)
{
unsigned int dim_x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int dim_y = blockDim.y * blockIdx.y + threadIdx.y;
int r,g,b,y,u,v;
if (dim_x < w && dim_y < h) {
y = *((u_char*)src + dim_y * pitch_src + dim_x);
u = *((u_char*)src + (h + dim_y / 4) * pitch_src + dim_x / 2);
v = *((u_char*)src + (h * 5 + dim_y) / 4 * pitch_src + dim_x / 2);
r = clamp(y + 1.402 * (v - 128) + 0.5);
g = clamp(y - 0.34414 * (u - 128) - 0.71414 * (v - 128) + 0.5);
b = clamp(y + 1.772 * (u - 128) + 0.5);
// *((uint32_t*)dst + dim_y * pitch_dst / 4 + dim_x) = (r << 24) + (g << 16) + (b << 8);
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4) = r;
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4 + 1) = g;
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4 + 2) = b;
*((u_char*)dst + dim_y * pitch_dst + dim_x * 4 + 3) = 255;
}
}
|
0db8e0e9acb9585be731aeade67a68644fa3d93a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: ajs42
#include "ComputeThermoHMAGPU.cuh"
#include "hoomd/VectorMath.h"
#include <assert.h>
//! Shared memory used in reducing the sums
extern __shared__ Scalar3 compute_thermo_hma_sdata[];
//! Shared memory used in final reduction
extern __shared__ Scalar3 compute_thermo_hma_final_sdata[];
/*! \file ComputeThermoGPU.cu
\brief Defines GPU kernel code for computing thermodynamic properties on the GPU. Used by ComputeThermoGPU.
*/
//! Perform partial sums of the thermo properties on the GPU
/*! \param d_scratch Scratch space to hold partial sums. One element is written per block
\param box Box the particles are in
\param d_net_force Net force / pe array from ParticleData
\param d_net_virial Net virial array from ParticleData
\param virial_pitch pitch of 2D virial array
\param d_position Particle position array from ParticleData
\param d_lattice_site Particle lattice site array
\param d_image Image array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members for which to sum properties
\param work_size Number of particles in the group this GPU processes
\param offset Offset of this GPU in list of group members
\param block_offset Offset of this GPU in the array of partial sums
All partial sums are packaged up in a Scalar3 to keep pointer management down.
- force * dr is summed in .x
- Potential energy is summed in .y
- W is summed in .z
One thread is executed per group member. That thread reads in the values for its member into shared memory
and then the block performs a reduction in parallel to produce a partial sum output for the block. These
partial sums are written to d_scratch[blockIdx.x]. sizeof(Scalar3)*block_size of dynamic shared memory are needed
for this kernel to run.
*/
__global__ void gpu_compute_thermo_hma_partial_sums(Scalar3 *d_scratch,
BoxDim box,
Scalar4 *d_net_force,
Scalar *d_net_virial,
const size_t virial_pitch,
Scalar4 *d_position,
Scalar3 *d_lattice_site,
int3 *d_image,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int work_size,
unsigned int offset,
unsigned int block_offset)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar3 my_element; // element of scratch space read in
// non-participating thread: contribute 0 to the sum
my_element = make_scalar3(0, 0, 0);
if (group_idx < work_size)
{
unsigned int idx = d_group_members[group_idx+offset];
// ignore rigid body constituent particles in the sum
unsigned int body = d_body[idx];
unsigned int tag = d_tag[idx];
if (body >= MIN_FLOPPY || body == tag)
{
Scalar4 net_force = d_net_force[idx];
Scalar net_isotropic_virial;
// (1/3)*trace of virial tensor
net_isotropic_virial = Scalar(1.0/3.0)*
(d_net_virial[0*virial_pitch+idx] // xx
+d_net_virial[3*virial_pitch+idx] // yy
+d_net_virial[5*virial_pitch+idx]); // zz
Scalar4 pos4 = d_position[idx];
Scalar3 pos3 = make_scalar3(pos4.x, pos4.y, pos4.z);
Scalar3 lat = d_lattice_site[tag];
Scalar3 dr = box.shift(pos3, d_image[idx]) - lat;
double fdr = 0;
fdr += (double)d_net_force[idx].x * dr.x;
fdr += (double)d_net_force[idx].y * dr.y;
fdr += (double)d_net_force[idx].z * dr.z;
// compute our contribution to the sum
my_element.x = Scalar(fdr);
my_element.y = net_force.w;
my_element.z = net_isotropic_virial;
}
}
compute_thermo_hma_sdata[threadIdx.x] = my_element;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_hma_sdata[threadIdx.x].x += compute_thermo_hma_sdata[threadIdx.x + offs].x;
compute_thermo_hma_sdata[threadIdx.x].y += compute_thermo_hma_sdata[threadIdx.x + offs].y;
compute_thermo_hma_sdata[threadIdx.x].z += compute_thermo_hma_sdata[threadIdx.x + offs].z;
}
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
Scalar3 res = compute_thermo_hma_sdata[0];
d_scratch[block_offset + blockIdx.x] = make_scalar3(res.x, res.y, res.z);
}
}
//! Complete partial sums and compute final thermodynamic quantities (for pressure, only isotropic contribution)
/*! \param d_properties Property array to write final values
\param d_scratch Partial sums
\param box Box the particles are in
\param D Dimensionality of the system
\param group_size Number of particles in the group
\param num_partial_sums Number of partial sums in \a d_scratch
\param temperature The temperature that governs sampling of the integrator
\param harmonicPressure The contribution to the pressure from harmonic fluctuations
\param external_virial External contribution to virial (1/3 trace)
\param external_energy External contribution to potential energy
Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final
sums, the thermodynamic properties are computed and written to d_properties.
sizeof(Scalar3)*block_size bytes of shared memory are needed for this kernel to run.
*/
__global__ void gpu_compute_thermo_hma_final_sums(Scalar *d_properties,
Scalar3 *d_scratch,
BoxDim box,
unsigned int D,
unsigned int group_size,
unsigned int num_partial_sums,
Scalar temperature,
Scalar harmonicPressure,
Scalar external_virial,
Scalar external_energy
)
{
Scalar3 final_sum = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_partial_sums; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
{
Scalar3 scratch = d_scratch[start + threadIdx.x];
compute_thermo_hma_final_sdata[threadIdx.x] = make_scalar3(scratch.x, scratch.y, scratch.z);
}
else
compute_thermo_hma_final_sdata[threadIdx.x] = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_hma_final_sdata[threadIdx.x].x += compute_thermo_hma_final_sdata[threadIdx.x + offs].x;
compute_thermo_hma_final_sdata[threadIdx.x].y += compute_thermo_hma_final_sdata[threadIdx.x + offs].y;
compute_thermo_hma_final_sdata[threadIdx.x].z += compute_thermo_hma_final_sdata[threadIdx.x + offs].z;
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
final_sum.x += compute_thermo_hma_final_sdata[0].x;
final_sum.y += compute_thermo_hma_final_sdata[0].y;
final_sum.z += compute_thermo_hma_final_sdata[0].z;
}
}
if (threadIdx.x == 0)
{
// compute final quantities
Scalar fdr = final_sum.x;
Scalar pe_total = final_sum.y + external_energy;
Scalar W = final_sum.z + external_virial;
// compute the pressure
// volume/area & other 2D stuff needed
Scalar volume;
Scalar3 L = box.getL();
if (D == 2)
{
// "volume" is area in 2D
volume = L.x * L.y;
// W needs to be corrected since the 1/3 factor is built in
W *= Scalar(3.0)/Scalar(2.0);
}
else
{
volume = L.x * L.y * L.z;
}
// pressure: P = (N * K_B * T + W)/V
Scalar fV = (harmonicPressure/temperature - group_size/volume)/(D*(group_size-1));
Scalar pressure = harmonicPressure + W / volume + fV*fdr;
// fill out the GPUArray
d_properties[thermoHMA_index::potential_energyHMA] = pe_total + 1.5*(group_size-1)*temperature + 0.5*fdr;
d_properties[thermoHMA_index::pressureHMA] = pressure;
}
}
//! Compute partial sums of thermodynamic properties of a group on the GPU,
/*! \param d_pos Particle position array from ParticleData
\param d_lattice_site Particle lattice site array
\param d_image Image array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param gpu_partition Load balancing info for multi-GPU reduction
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details.
*/
hipError_t gpu_compute_thermo_hma_partial( Scalar4 *d_pos,
Scalar3 *d_lattice_site,
int3 *d_image,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_hma_args& args,
const GPUPartition& gpu_partition
)
{
assert(d_pos);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
unsigned int block_offset = 0;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
dim3 grid(nwork/args.block_size+1, 1, 1);
dim3 threads(args.block_size, 1, 1);
unsigned int shared_bytes = (unsigned int)(sizeof(Scalar3)*args.block_size);
hipLaunchKernelGGL(( gpu_compute_thermo_hma_partial_sums), dim3(grid),dim3(threads), shared_bytes, 0, args.d_scratch,
box,
args.d_net_force,
args.d_net_virial,
args.virial_pitch,
d_pos,
d_lattice_site,
d_image,
d_body,
d_tag,
d_group_members,
nwork,
range.first,
block_offset);
block_offset += grid.x;
}
assert(block_offset <= args.n_blocks);
return hipSuccess;
}
//! Compute thermodynamic properties of a group on the GPU
/*! \param d_properties Array to write computed properties
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param num_blocks Number of partial sums to reduce
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details.
*/
hipError_t gpu_compute_thermo_hma_final(Scalar *d_properties,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_hma_args& args
)
{
assert(d_properties);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
// setup the grid to run the final kernel
int final_block_size = 512;
dim3 grid = dim3(1, 1, 1);
dim3 threads = dim3(final_block_size, 1, 1);
unsigned int shared_bytes = (unsigned int)(sizeof(Scalar3)*final_block_size);
Scalar external_virial = Scalar(1.0/3.0)*(args.external_virial_xx
+ args.external_virial_yy
+ args.external_virial_zz);
// run the kernel
hipLaunchKernelGGL(( gpu_compute_thermo_hma_final_sums), dim3(grid), dim3(threads), shared_bytes, 0, d_properties,
args.d_scratch,
box,
args.D,
group_size,
args.n_blocks,
args.temperature,
args.harmonicPressure,
external_virial,
args.external_energy);
return hipSuccess;
}
|
0db8e0e9acb9585be731aeade67a68644fa3d93a.cu
|
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: ajs42
#include "ComputeThermoHMAGPU.cuh"
#include "hoomd/VectorMath.h"
#include <assert.h>
//! Shared memory used in reducing the sums
extern __shared__ Scalar3 compute_thermo_hma_sdata[];
//! Shared memory used in final reduction
extern __shared__ Scalar3 compute_thermo_hma_final_sdata[];
/*! \file ComputeThermoGPU.cu
\brief Defines GPU kernel code for computing thermodynamic properties on the GPU. Used by ComputeThermoGPU.
*/
//! Perform partial sums of the thermo properties on the GPU
/*! \param d_scratch Scratch space to hold partial sums. One element is written per block
\param box Box the particles are in
\param d_net_force Net force / pe array from ParticleData
\param d_net_virial Net virial array from ParticleData
\param virial_pitch pitch of 2D virial array
\param d_position Particle position array from ParticleData
\param d_lattice_site Particle lattice site array
\param d_image Image array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members for which to sum properties
\param work_size Number of particles in the group this GPU processes
\param offset Offset of this GPU in list of group members
\param block_offset Offset of this GPU in the array of partial sums
All partial sums are packaged up in a Scalar3 to keep pointer management down.
- force * dr is summed in .x
- Potential energy is summed in .y
- W is summed in .z
One thread is executed per group member. That thread reads in the values for its member into shared memory
and then the block performs a reduction in parallel to produce a partial sum output for the block. These
partial sums are written to d_scratch[blockIdx.x]. sizeof(Scalar3)*block_size of dynamic shared memory are needed
for this kernel to run.
*/
__global__ void gpu_compute_thermo_hma_partial_sums(Scalar3 *d_scratch,
BoxDim box,
Scalar4 *d_net_force,
Scalar *d_net_virial,
const size_t virial_pitch,
Scalar4 *d_position,
Scalar3 *d_lattice_site,
int3 *d_image,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int work_size,
unsigned int offset,
unsigned int block_offset)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar3 my_element; // element of scratch space read in
// non-participating thread: contribute 0 to the sum
my_element = make_scalar3(0, 0, 0);
if (group_idx < work_size)
{
unsigned int idx = d_group_members[group_idx+offset];
// ignore rigid body constituent particles in the sum
unsigned int body = d_body[idx];
unsigned int tag = d_tag[idx];
if (body >= MIN_FLOPPY || body == tag)
{
Scalar4 net_force = d_net_force[idx];
Scalar net_isotropic_virial;
// (1/3)*trace of virial tensor
net_isotropic_virial = Scalar(1.0/3.0)*
(d_net_virial[0*virial_pitch+idx] // xx
+d_net_virial[3*virial_pitch+idx] // yy
+d_net_virial[5*virial_pitch+idx]); // zz
Scalar4 pos4 = d_position[idx];
Scalar3 pos3 = make_scalar3(pos4.x, pos4.y, pos4.z);
Scalar3 lat = d_lattice_site[tag];
Scalar3 dr = box.shift(pos3, d_image[idx]) - lat;
double fdr = 0;
fdr += (double)d_net_force[idx].x * dr.x;
fdr += (double)d_net_force[idx].y * dr.y;
fdr += (double)d_net_force[idx].z * dr.z;
// compute our contribution to the sum
my_element.x = Scalar(fdr);
my_element.y = net_force.w;
my_element.z = net_isotropic_virial;
}
}
compute_thermo_hma_sdata[threadIdx.x] = my_element;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_hma_sdata[threadIdx.x].x += compute_thermo_hma_sdata[threadIdx.x + offs].x;
compute_thermo_hma_sdata[threadIdx.x].y += compute_thermo_hma_sdata[threadIdx.x + offs].y;
compute_thermo_hma_sdata[threadIdx.x].z += compute_thermo_hma_sdata[threadIdx.x + offs].z;
}
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
Scalar3 res = compute_thermo_hma_sdata[0];
d_scratch[block_offset + blockIdx.x] = make_scalar3(res.x, res.y, res.z);
}
}
//! Complete partial sums and compute final thermodynamic quantities (for pressure, only isotropic contribution)
/*! \param d_properties Property array to write final values
\param d_scratch Partial sums
\param box Box the particles are in
\param D Dimensionality of the system
\param group_size Number of particles in the group
\param num_partial_sums Number of partial sums in \a d_scratch
\param temperature The temperature that governs sampling of the integrator
\param harmonicPressure The contribution to the pressure from harmonic fluctuations
\param external_virial External contribution to virial (1/3 trace)
\param external_energy External contribution to potential energy
Only one block is executed. In that block, the partial sums are read in and reduced to final values. From the final
sums, the thermodynamic properties are computed and written to d_properties.
sizeof(Scalar3)*block_size bytes of shared memory are needed for this kernel to run.
*/
__global__ void gpu_compute_thermo_hma_final_sums(Scalar *d_properties,
Scalar3 *d_scratch,
BoxDim box,
unsigned int D,
unsigned int group_size,
unsigned int num_partial_sums,
Scalar temperature,
Scalar harmonicPressure,
Scalar external_virial,
Scalar external_energy
)
{
Scalar3 final_sum = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_partial_sums; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
{
Scalar3 scratch = d_scratch[start + threadIdx.x];
compute_thermo_hma_final_sdata[threadIdx.x] = make_scalar3(scratch.x, scratch.y, scratch.z);
}
else
compute_thermo_hma_final_sdata[threadIdx.x] = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_hma_final_sdata[threadIdx.x].x += compute_thermo_hma_final_sdata[threadIdx.x + offs].x;
compute_thermo_hma_final_sdata[threadIdx.x].y += compute_thermo_hma_final_sdata[threadIdx.x + offs].y;
compute_thermo_hma_final_sdata[threadIdx.x].z += compute_thermo_hma_final_sdata[threadIdx.x + offs].z;
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
final_sum.x += compute_thermo_hma_final_sdata[0].x;
final_sum.y += compute_thermo_hma_final_sdata[0].y;
final_sum.z += compute_thermo_hma_final_sdata[0].z;
}
}
if (threadIdx.x == 0)
{
// compute final quantities
Scalar fdr = final_sum.x;
Scalar pe_total = final_sum.y + external_energy;
Scalar W = final_sum.z + external_virial;
// compute the pressure
// volume/area & other 2D stuff needed
Scalar volume;
Scalar3 L = box.getL();
if (D == 2)
{
// "volume" is area in 2D
volume = L.x * L.y;
// W needs to be corrected since the 1/3 factor is built in
W *= Scalar(3.0)/Scalar(2.0);
}
else
{
volume = L.x * L.y * L.z;
}
// pressure: P = (N * K_B * T + W)/V
Scalar fV = (harmonicPressure/temperature - group_size/volume)/(D*(group_size-1));
Scalar pressure = harmonicPressure + W / volume + fV*fdr;
// fill out the GPUArray
d_properties[thermoHMA_index::potential_energyHMA] = pe_total + 1.5*(group_size-1)*temperature + 0.5*fdr;
d_properties[thermoHMA_index::pressureHMA] = pressure;
}
}
//! Compute partial sums of thermodynamic properties of a group on the GPU,
/*! \param d_pos Particle position array from ParticleData
\param d_lattice_site Particle lattice site array
\param d_image Image array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param gpu_partition Load balancing info for multi-GPU reduction
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details.
*/
hipError_t gpu_compute_thermo_hma_partial( Scalar4 *d_pos,
Scalar3 *d_lattice_site,
int3 *d_image,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_hma_args& args,
const GPUPartition& gpu_partition
)
{
assert(d_pos);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
unsigned int block_offset = 0;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
dim3 grid(nwork/args.block_size+1, 1, 1);
dim3 threads(args.block_size, 1, 1);
unsigned int shared_bytes = (unsigned int)(sizeof(Scalar3)*args.block_size);
gpu_compute_thermo_hma_partial_sums<<<grid,threads, shared_bytes>>>(args.d_scratch,
box,
args.d_net_force,
args.d_net_virial,
args.virial_pitch,
d_pos,
d_lattice_site,
d_image,
d_body,
d_tag,
d_group_members,
nwork,
range.first,
block_offset);
block_offset += grid.x;
}
assert(block_offset <= args.n_blocks);
return hipSuccess;
}
//! Compute thermodynamic properties of a group on the GPU
/*! \param d_properties Array to write computed properties
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param num_blocks Number of partial sums to reduce
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them for details.
*/
hipError_t gpu_compute_thermo_hma_final(Scalar *d_properties,
unsigned int *d_body,
unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_hma_args& args
)
{
assert(d_properties);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
// setup the grid to run the final kernel
int final_block_size = 512;
dim3 grid = dim3(1, 1, 1);
dim3 threads = dim3(final_block_size, 1, 1);
unsigned int shared_bytes = (unsigned int)(sizeof(Scalar3)*final_block_size);
Scalar external_virial = Scalar(1.0/3.0)*(args.external_virial_xx
+ args.external_virial_yy
+ args.external_virial_zz);
// run the kernel
gpu_compute_thermo_hma_final_sums<<<grid, threads, shared_bytes>>>(d_properties,
args.d_scratch,
box,
args.D,
group_size,
args.n_blocks,
args.temperature,
args.harmonicPressure,
external_virial,
args.external_energy);
return hipSuccess;
}
|
c80b263b0b17c180061024a6ebbc6de55ab7abe0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#define THREAD_BLOCK_SIZE 256
template <typename Dtype>
__global__ void mean_statistic(const int num, const int map_size, const int channels,
Dtype stat_ratio, bool save_mean, bool moving_mean, Dtype decay, Dtype com_decay,
const Dtype* in, Dtype* mean, Dtype* history_mean, Dtype* out, int norm_size) {
__shared__ Dtype buffer[THREAD_BLOCK_SIZE];
buffer[threadIdx.x] = 0;
if(!moving_mean) {
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size)
buffer[threadIdx.x] += in[location];
}
__syncthreads();
for(int i = blockDim.x / 2; i > 0; i >>= 1) {
if(threadIdx.x < i) buffer[threadIdx.x] += buffer[threadIdx.x + i];
__syncthreads();
}
if(threadIdx.x == 0) {
buffer[0] = buffer[0] * stat_ratio;
if(save_mean) mean[blockIdx.x] += (decay * buffer[0] + com_decay * history_mean[blockIdx.x]) / norm_size;
}
}
else if(threadIdx.x == 0)
buffer[0] = history_mean[blockIdx.x];
__syncthreads();
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size)
out[location] = in[location] - buffer[0];
}
}
template <typename Dtype>
__global__ void var_statistic(const int num, const int map_size, const int channels,
Dtype in_pow, Dtype stat_ratio, Dtype stat_eps, Dtype stat_pow,
bool save_mean, bool moving_mean, Dtype decay, Dtype com_decay,
const Dtype* in, Dtype* mean, Dtype* history_mean, Dtype* out,
Dtype* x_norm,Dtype* x_std, const Dtype* scale,const Dtype* shift, int norm_size) {
__shared__ Dtype buffer[THREAD_BLOCK_SIZE];
buffer[threadIdx.x] = 0;
if(!moving_mean) {
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size)
buffer[threadIdx.x] += pow(in[location],in_pow);
}
__syncthreads();
for(int i = blockDim.x/2; i > 0; i >>= 1) {
if(threadIdx.x < i) buffer[threadIdx.x] += buffer[threadIdx.x + i];
__syncthreads();
}
if(threadIdx.x == 0) {
buffer[0] = buffer[0] * stat_ratio;
if(save_mean) mean[blockIdx.x] += (decay * buffer[0] + com_decay * history_mean[blockIdx.x]) / norm_size;
}
}
else if(threadIdx.x == 0)
buffer[0] = history_mean[blockIdx.x];
__syncthreads();
Dtype temp = pow(buffer[0] + stat_eps, stat_pow);
Dtype scale_value = scale[blockIdx.x], shift_value = shift[blockIdx.x];
if(threadIdx.x == 0) x_std[blockIdx.x] = temp;
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size) {
x_norm[location] = in[location] / temp;
out[location] = in[location] / temp * scale_value + shift_value;
}
}
}
template <typename Dtype>
void BNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int num_ = bottom[0]->num();
int channels_ = bottom[0]->channels();
int height_ = bottom[0]->height();
int width_ = bottom[0]->width();
const Dtype* const_bottom_data = bottom[0]->gpu_data();
const Dtype* const_top_data = top[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* scale_data = this->blobs_[0]->gpu_data();
const Dtype* shift_data = this->blobs_[1]->gpu_data();
bool save_mean = this->phase_ == TRAIN && this->param_propagate_down_[0];
hipLaunchKernelGGL(( mean_statistic<Dtype>), dim3(channels_), dim3(THREAD_BLOCK_SIZE), 0, 0, num_, height_ * width_, channels_,
Dtype(1. / (height_ * width_ * num_)),save_mean,
(this->phase_ == TEST || !this->param_propagate_down_[0]) && moving_average_, decay_, Dtype(1) - decay_,
const_bottom_data, this->blobs_[2]->mutable_gpu_diff(),
this->blobs_[2]->mutable_gpu_data(), top_data, Caffe::getIterSize() * Caffe::getThreadNum());
CUDA_POST_KERNEL_CHECK;
/*
hipLaunchKernelGGL(( var_statistic<Dtype>), dim3(channels_), dim3(THREAD_BLOCK_SIZE), 0, 0, num_, height_ * width_, channels_, Dtype(2),
Dtype(1. / (height_ * width_ * num_)), var_eps_, Dtype(0.5),
save_mean, (this->phase_ == TEST || !this->param_propagate_down_[0]) && moving_average_,
(num_)*decay_/(num_-1), Dtype(1)-(num_)*decay_/(num_-1),
const_top_data, this->blobs_[3]->mutable_gpu_diff(),this->blobs_[3]->mutable_gpu_data(),
top_data,x_norm_.mutable_gpu_data(),x_std_.mutable_gpu_data(),
scale_data,shift_data, Caffe::getIterSize() * Caffe::getThreadNum());
*/
hipLaunchKernelGGL(( var_statistic<Dtype>), dim3(channels_), dim3(THREAD_BLOCK_SIZE), 0, 0, num_, height_ * width_, channels_, Dtype(2),
Dtype(1. / (height_ * width_ * num_)), var_eps_, Dtype(0.5),
save_mean, (this->phase_ == TEST || !this->param_propagate_down_[0]) && moving_average_,
decay_, Dtype(1)-decay_,
const_top_data, this->blobs_[3]->mutable_gpu_diff(),this->blobs_[3]->mutable_gpu_data(),
top_data,x_norm_.mutable_gpu_data(),x_std_.mutable_gpu_data(),
scale_data,shift_data, Caffe::getIterSize() * Caffe::getThreadNum());
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void scale_shift_bottom_gradient(const int num, const int map_size, const int channels,
const Dtype* in, const Dtype* x_norm, Dtype* scale_diff, Dtype* shift_diff, const Dtype* scale_data,
const Dtype* x_std, Dtype* out) {
__shared__ Dtype buffer_scale_diff[THREAD_BLOCK_SIZE];
__shared__ Dtype buffer_shift_diff[THREAD_BLOCK_SIZE];
buffer_scale_diff[threadIdx.x] = 0;
buffer_shift_diff[threadIdx.x] = 0;
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size){
buffer_scale_diff[threadIdx.x] += (in[location] * x_norm[location]);
buffer_shift_diff[threadIdx.x] += in[location];
}
}
__syncthreads();
for(int i = blockDim.x / 2; i > 0; i >>= 1) {
if(threadIdx.x < i) buffer_scale_diff[threadIdx.x] += buffer_scale_diff[threadIdx.x + i];
if(threadIdx.x < i) buffer_shift_diff[threadIdx.x] += buffer_shift_diff[threadIdx.x + i];
__syncthreads();
}
if(threadIdx.x == 0) {
scale_diff[blockIdx.x] = buffer_scale_diff[0];
shift_diff[blockIdx.x] = buffer_shift_diff[0];
}
__syncthreads();
Dtype s_data_v = scale_data[blockIdx.x], x_std_v = x_std[blockIdx.x];
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size) {
out[location] = s_data_v * (in[location] - (x_norm[location] *
buffer_scale_diff[0] + buffer_shift_diff[0]) / (num * map_size)) / x_std_v;
}
}
}
template <typename Dtype>
void BNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
int num_ = bottom[0]->num();
int channels_ = bottom[0]->channels();
int height_ = bottom[0]->height();
int width_ = bottom[0]->width();
const Dtype* const_bottom_diff = bottom[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* const_top_diff = top[0]->gpu_diff();
Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* shift_diff = this->blobs_[1]->mutable_gpu_diff();
const Dtype* scale_data = this->blobs_[0]->gpu_data();
if (this->param_propagate_down_[0] && propagate_down[0]) {
hipLaunchKernelGGL(( scale_shift_bottom_gradient<Dtype>), dim3(channels_), dim3(THREAD_BLOCK_SIZE), 0, 0, num_, height_ * width_, channels_,
const_top_diff, x_norm_.gpu_data(), scale_diff, shift_diff, scale_data, x_std_.gpu_data(), bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
caffe_copy(this->blobs_[2]->count(), this->blobs_[2]->gpu_diff(), this->blobs_[2]->mutable_gpu_data());
caffe_copy(this->blobs_[3]->count(), this->blobs_[3]->gpu_diff(), this->blobs_[3]->mutable_gpu_data());
}
//INSTANTIATE_CLASS(BNLayer);
INSTANTIATE_LAYER_GPU_FUNCS(BNLayer);
} // namespace caffe
|
c80b263b0b17c180061024a6ebbc6de55ab7abe0.cu
|
#include <algorithm>
#include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
#define THREAD_BLOCK_SIZE 256
template <typename Dtype>
__global__ void mean_statistic(const int num, const int map_size, const int channels,
Dtype stat_ratio, bool save_mean, bool moving_mean, Dtype decay, Dtype com_decay,
const Dtype* in, Dtype* mean, Dtype* history_mean, Dtype* out, int norm_size) {
__shared__ Dtype buffer[THREAD_BLOCK_SIZE];
buffer[threadIdx.x] = 0;
if(!moving_mean) {
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size)
buffer[threadIdx.x] += in[location];
}
__syncthreads();
for(int i = blockDim.x / 2; i > 0; i >>= 1) {
if(threadIdx.x < i) buffer[threadIdx.x] += buffer[threadIdx.x + i];
__syncthreads();
}
if(threadIdx.x == 0) {
buffer[0] = buffer[0] * stat_ratio;
if(save_mean) mean[blockIdx.x] += (decay * buffer[0] + com_decay * history_mean[blockIdx.x]) / norm_size;
}
}
else if(threadIdx.x == 0)
buffer[0] = history_mean[blockIdx.x];
__syncthreads();
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size)
out[location] = in[location] - buffer[0];
}
}
template <typename Dtype>
__global__ void var_statistic(const int num, const int map_size, const int channels,
Dtype in_pow, Dtype stat_ratio, Dtype stat_eps, Dtype stat_pow,
bool save_mean, bool moving_mean, Dtype decay, Dtype com_decay,
const Dtype* in, Dtype* mean, Dtype* history_mean, Dtype* out,
Dtype* x_norm,Dtype* x_std, const Dtype* scale,const Dtype* shift, int norm_size) {
__shared__ Dtype buffer[THREAD_BLOCK_SIZE];
buffer[threadIdx.x] = 0;
if(!moving_mean) {
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size)
buffer[threadIdx.x] += pow(in[location],in_pow);
}
__syncthreads();
for(int i = blockDim.x/2; i > 0; i >>= 1) {
if(threadIdx.x < i) buffer[threadIdx.x] += buffer[threadIdx.x + i];
__syncthreads();
}
if(threadIdx.x == 0) {
buffer[0] = buffer[0] * stat_ratio;
if(save_mean) mean[blockIdx.x] += (decay * buffer[0] + com_decay * history_mean[blockIdx.x]) / norm_size;
}
}
else if(threadIdx.x == 0)
buffer[0] = history_mean[blockIdx.x];
__syncthreads();
Dtype temp = pow(buffer[0] + stat_eps, stat_pow);
Dtype scale_value = scale[blockIdx.x], shift_value = shift[blockIdx.x];
if(threadIdx.x == 0) x_std[blockIdx.x] = temp;
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size) {
x_norm[location] = in[location] / temp;
out[location] = in[location] / temp * scale_value + shift_value;
}
}
}
template <typename Dtype>
void BNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int num_ = bottom[0]->num();
int channels_ = bottom[0]->channels();
int height_ = bottom[0]->height();
int width_ = bottom[0]->width();
const Dtype* const_bottom_data = bottom[0]->gpu_data();
const Dtype* const_top_data = top[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* scale_data = this->blobs_[0]->gpu_data();
const Dtype* shift_data = this->blobs_[1]->gpu_data();
bool save_mean = this->phase_ == TRAIN && this->param_propagate_down_[0];
mean_statistic<Dtype><<<channels_, THREAD_BLOCK_SIZE>>>(num_, height_ * width_, channels_,
Dtype(1. / (height_ * width_ * num_)),save_mean,
(this->phase_ == TEST || !this->param_propagate_down_[0]) && moving_average_, decay_, Dtype(1) - decay_,
const_bottom_data, this->blobs_[2]->mutable_gpu_diff(),
this->blobs_[2]->mutable_gpu_data(), top_data, Caffe::getIterSize() * Caffe::getThreadNum());
CUDA_POST_KERNEL_CHECK;
/*
var_statistic<Dtype><<<channels_, THREAD_BLOCK_SIZE>>>(num_, height_ * width_, channels_, Dtype(2),
Dtype(1. / (height_ * width_ * num_)), var_eps_, Dtype(0.5),
save_mean, (this->phase_ == TEST || !this->param_propagate_down_[0]) && moving_average_,
(num_)*decay_/(num_-1), Dtype(1)-(num_)*decay_/(num_-1),
const_top_data, this->blobs_[3]->mutable_gpu_diff(),this->blobs_[3]->mutable_gpu_data(),
top_data,x_norm_.mutable_gpu_data(),x_std_.mutable_gpu_data(),
scale_data,shift_data, Caffe::getIterSize() * Caffe::getThreadNum());
*/
var_statistic<Dtype><<<channels_, THREAD_BLOCK_SIZE>>>(num_, height_ * width_, channels_, Dtype(2),
Dtype(1. / (height_ * width_ * num_)), var_eps_, Dtype(0.5),
save_mean, (this->phase_ == TEST || !this->param_propagate_down_[0]) && moving_average_,
decay_, Dtype(1)-decay_,
const_top_data, this->blobs_[3]->mutable_gpu_diff(),this->blobs_[3]->mutable_gpu_data(),
top_data,x_norm_.mutable_gpu_data(),x_std_.mutable_gpu_data(),
scale_data,shift_data, Caffe::getIterSize() * Caffe::getThreadNum());
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void scale_shift_bottom_gradient(const int num, const int map_size, const int channels,
const Dtype* in, const Dtype* x_norm, Dtype* scale_diff, Dtype* shift_diff, const Dtype* scale_data,
const Dtype* x_std, Dtype* out) {
__shared__ Dtype buffer_scale_diff[THREAD_BLOCK_SIZE];
__shared__ Dtype buffer_shift_diff[THREAD_BLOCK_SIZE];
buffer_scale_diff[threadIdx.x] = 0;
buffer_shift_diff[threadIdx.x] = 0;
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size){
buffer_scale_diff[threadIdx.x] += (in[location] * x_norm[location]);
buffer_shift_diff[threadIdx.x] += in[location];
}
}
__syncthreads();
for(int i = blockDim.x / 2; i > 0; i >>= 1) {
if(threadIdx.x < i) buffer_scale_diff[threadIdx.x] += buffer_scale_diff[threadIdx.x + i];
if(threadIdx.x < i) buffer_shift_diff[threadIdx.x] += buffer_shift_diff[threadIdx.x + i];
__syncthreads();
}
if(threadIdx.x == 0) {
scale_diff[blockIdx.x] = buffer_scale_diff[0];
shift_diff[blockIdx.x] = buffer_shift_diff[0];
}
__syncthreads();
Dtype s_data_v = scale_data[blockIdx.x], x_std_v = x_std[blockIdx.x];
for(int i = threadIdx.x; i < num * map_size; i += blockDim.x) {
int location = i / map_size * map_size * channels + (i % map_size) + blockIdx.x * map_size;
if(i < num * map_size) {
out[location] = s_data_v * (in[location] - (x_norm[location] *
buffer_scale_diff[0] + buffer_shift_diff[0]) / (num * map_size)) / x_std_v;
}
}
}
template <typename Dtype>
void BNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
int num_ = bottom[0]->num();
int channels_ = bottom[0]->channels();
int height_ = bottom[0]->height();
int width_ = bottom[0]->width();
const Dtype* const_bottom_diff = bottom[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* const_top_diff = top[0]->gpu_diff();
Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* shift_diff = this->blobs_[1]->mutable_gpu_diff();
const Dtype* scale_data = this->blobs_[0]->gpu_data();
if (this->param_propagate_down_[0] && propagate_down[0]) {
scale_shift_bottom_gradient<Dtype><<<channels_, THREAD_BLOCK_SIZE>>>(num_, height_ * width_, channels_,
const_top_diff, x_norm_.gpu_data(), scale_diff, shift_diff, scale_data, x_std_.gpu_data(), bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
caffe_copy(this->blobs_[2]->count(), this->blobs_[2]->gpu_diff(), this->blobs_[2]->mutable_gpu_data());
caffe_copy(this->blobs_[3]->count(), this->blobs_[3]->gpu_diff(), this->blobs_[3]->mutable_gpu_data());
}
//INSTANTIATE_CLASS(BNLayer);
INSTANTIATE_LAYER_GPU_FUNCS(BNLayer);
} // namespace caffe
|
4a9888ea47a5beb72130c8285126426e73ab050e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/OpMathType.h>
#include <ATen/native/hip/GridSampler.h>
#include <ATen/native/GridSamplerUtils.h>
#include <ATen/native/hip/GridSampler.cuh>
#include <ATen/native/hip/UpSample.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
#include <c10/macros/Macros.h>
#include <cmath>
namespace at::native {
using namespace at::cuda::detail;
using at::native::detail::GridSamplerInterpolation;
using at::native::detail::GridSamplerPadding;
namespace {
template <typename scalar_t, typename index_t>
C10_LAUNCH_BOUNDS_1(256)
__global__ void grid_sampler_2d_kernel(
const index_t nthreads,
TensorInfo<scalar_t, index_t> input,
TensorInfo<scalar_t, index_t> grid,
TensorInfo<scalar_t, index_t> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
using opmath_t = at::opmath_type<scalar_t>;
index_t C = input.sizes[1];
index_t inp_H = input.sizes[2];
index_t inp_W = input.sizes[3];
index_t out_H = grid.sizes[1];
index_t out_W = grid.sizes[2];
index_t inp_sN = input.strides[0];
index_t inp_sC = input.strides[1];
index_t inp_sH = input.strides[2];
index_t inp_sW = input.strides[3];
index_t grid_sN = grid.strides[0];
index_t grid_sH = grid.strides[1];
index_t grid_sW = grid.strides[2];
index_t grid_sCoor = grid.strides[3];
index_t out_sN = output.strides[0];
index_t out_sC = output.strides[1];
index_t out_sH = output.strides[2];
index_t out_sW = output.strides[3];
CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) {
const index_t w = index % out_W;
const index_t h = (index / out_W) % out_H;
const index_t n = index / (out_H * out_W);
const index_t grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
opmath_t x = grid.data[grid_offset];
opmath_t y = grid.data[grid_offset + grid_sCoor];
opmath_t ix = grid_sampler_compute_source_index(x, inp_W, padding_mode, align_corners);
opmath_t iy = grid_sampler_compute_source_index(y, inp_H, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
index_t ix_nw = static_cast<index_t>(::floor(ix));
index_t iy_nw = static_cast<index_t>(::floor(iy));
index_t ix_ne = ix_nw + 1;
index_t iy_ne = iy_nw;
index_t ix_sw = ix_nw;
index_t iy_sw = iy_nw + 1;
index_t ix_se = ix_nw + 1;
index_t iy_se = iy_nw + 1;
// get surfaces to each neighbor:
opmath_t nw = (ix_se - ix) * (iy_se - iy);
opmath_t ne = (ix - ix_sw) * (iy_sw - iy);
opmath_t sw = (ix_ne - ix) * (iy - iy_ne);
opmath_t se = (ix - ix_nw) * (iy - iy_nw);
// calculate bilinear weighted pixel value and set output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
opmath_t out_acc = 0;
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se;
}
*out_ptr_NCHW = out_acc;
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
index_t ix_nearest = static_cast<index_t>(std::nearbyint(ix));
index_t iy_nearest = static_cast<index_t>(std::nearbyint(iy));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) {
*out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCHW = static_cast<scalar_t>(0);
}
}
} else if (interpolation_mode == GridSamplerInterpolation::Bicubic) {
ix = grid_sampler_unnormalize(x, inp_W, align_corners);
iy = grid_sampler_unnormalize(y, inp_H, align_corners);
opmath_t ix_nw = ::floor(ix);
opmath_t iy_nw = ::floor(iy);
const opmath_t tx = ix - ix_nw;
const opmath_t ty = iy - iy_nw;
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
opmath_t coefficients[4];
#pragma unroll 4
for (index_t i = 0; i < 4; ++i) {
coefficients[i] = cubic_interp1d(
get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners),
get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 0, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners),
get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners),
get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 2, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners),
tx);
}
*out_ptr_NCHW = cubic_interp1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
ty);
}
}
}
}
template <typename scalar_t, typename index_t>
C10_LAUNCH_BOUNDS_1(512)
__global__ void grid_sampler_3d_kernel(
const index_t nthreads,
TensorInfo<scalar_t, index_t> input,
TensorInfo<scalar_t, index_t> grid,
TensorInfo<scalar_t, index_t> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
using opmath_t = at::opmath_type<scalar_t>;
index_t C = input.sizes[1];
index_t inp_D = input.sizes[2];
index_t inp_H = input.sizes[3];
index_t inp_W = input.sizes[4];
index_t out_D = grid.sizes[1];
index_t out_H = grid.sizes[2];
index_t out_W = grid.sizes[3];
index_t inp_sN = input.strides[0];
index_t inp_sC = input.strides[1];
index_t inp_sD = input.strides[2];
index_t inp_sH = input.strides[3];
index_t inp_sW = input.strides[4];
index_t grid_sN = grid.strides[0];
index_t grid_sD = grid.strides[1];
index_t grid_sH = grid.strides[2];
index_t grid_sW = grid.strides[3];
index_t grid_sCoor = grid.strides[4];
index_t out_sN = output.strides[0];
index_t out_sC = output.strides[1];
index_t out_sD = output.strides[2];
index_t out_sH = output.strides[3];
index_t out_sW = output.strides[4];
CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) {
const index_t w = index % out_W;
const index_t h = (index / out_W) % out_H;
const index_t d = (index / (out_H * out_W)) % out_D;
const index_t n = index / (out_D * out_H * out_W);
const index_t grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
opmath_t x = grid.data[grid_offset];
opmath_t y = grid.data[grid_offset + grid_sCoor];
opmath_t z = grid.data[grid_offset + 2 * grid_sCoor];
opmath_t ix = grid_sampler_compute_source_index(x, inp_W, padding_mode, align_corners);
opmath_t iy = grid_sampler_compute_source_index(y, inp_H, padding_mode, align_corners);
opmath_t iz = grid_sampler_compute_source_index(z, inp_D, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
index_t ix_tnw = static_cast<index_t>(::floor(ix));
index_t iy_tnw = static_cast<index_t>(::floor(iy));
index_t iz_tnw = static_cast<index_t>(::floor(iz));
index_t ix_tne = ix_tnw + 1;
index_t iy_tne = iy_tnw;
index_t iz_tne = iz_tnw;
index_t ix_tsw = ix_tnw;
index_t iy_tsw = iy_tnw + 1;
index_t iz_tsw = iz_tnw;
index_t ix_tse = ix_tnw + 1;
index_t iy_tse = iy_tnw + 1;
index_t iz_tse = iz_tnw;
index_t ix_bnw = ix_tnw;
index_t iy_bnw = iy_tnw;
index_t iz_bnw = iz_tnw + 1;
index_t ix_bne = ix_tnw + 1;
index_t iy_bne = iy_tnw;
index_t iz_bne = iz_tnw + 1;
index_t ix_bsw = ix_tnw;
index_t iy_bsw = iy_tnw + 1;
index_t iz_bsw = iz_tnw + 1;
index_t ix_bse = ix_tnw + 1;
index_t iy_bse = iy_tnw + 1;
index_t iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
opmath_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
opmath_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
opmath_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
opmath_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
opmath_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
opmath_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
opmath_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
opmath_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
// (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne
// + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse
// + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne
// + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse
opmath_t out_acc = 0;
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse;
}
*out_ptr_NCDHW = out_acc;
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
index_t ix_nearest = static_cast<index_t>(std::round(ix));
index_t iy_nearest = static_cast<index_t>(std::round(iy));
index_t iz_nearest = static_cast<index_t>(std::round(iz));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCDHW = static_cast<scalar_t>(0);
}
}
}
}
}
// Note [Passing pointer and offset to fastAtomicAdd]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// For its internal bounds checking, fastAtomicAdd needs to know where the destination address
// lies relative to the entire tensor, so we pass the base grad_input.data and full offset information,
// including batch * channel offset (NC_offset).
template <typename scalar_t, typename index_t>
C10_LAUNCH_BOUNDS_1(256)
__global__ void grid_sampler_2d_backward_kernel(
const index_t nthreads,
TensorInfo<scalar_t, index_t> grad_output,
TensorInfo<scalar_t, index_t> input,
TensorInfo<scalar_t, index_t> grid,
TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros (or unused if input_requires_grad is false)
TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners,
const index_t grad_input_memory_span,
const bool input_requires_grad) {
index_t C = input.sizes[1];
index_t inp_H = input.sizes[2];
index_t inp_W = input.sizes[3];
index_t out_H = grid.sizes[1];
index_t out_W = grid.sizes[2];
index_t inp_sN = input.strides[0];
index_t inp_sC = input.strides[1];
index_t inp_sH = input.strides[2];
index_t inp_sW = input.strides[3];
index_t grid_sN = grid.strides[0];
index_t grid_sH = grid.strides[1];
index_t grid_sW = grid.strides[2];
index_t grid_sCoor = grid.strides[3];
index_t gOut_sN = grad_output.strides[0];
index_t gOut_sC = grad_output.strides[1];
index_t gOut_sH = grad_output.strides[2];
index_t gOut_sW = grad_output.strides[3];
// gInp_* (and NC_offset below) are not really needed if input_requires_grad is false.
index_t gInp_sN;
index_t gInp_sC;
index_t gInp_sH;
index_t gInp_sW;
if (input_requires_grad) {
gInp_sN = grad_input.strides[0];
gInp_sC = grad_input.strides[1];
gInp_sH = grad_input.strides[2];
gInp_sW = grad_input.strides[3];
}
index_t gGrid_sW = grad_grid.strides[2];
CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) {
const index_t w = index % out_W;
const index_t h = (index / out_W) % out_H;
const index_t n = index / (out_H * out_W);
const auto grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
scalar_t x = grid.data[grid_offset];
scalar_t y = grid.data[grid_offset + grid_sCoor];
// multipliers for gradients on ix and iy
scalar_t gix_mult, giy_mult;
scalar_t ix = grid_sampler_compute_source_index_set_grad(x, inp_W, padding_mode, align_corners, &gix_mult);
scalar_t iy = grid_sampler_compute_source_index_set_grad(y, inp_H, padding_mode, align_corners, &giy_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
index_t ix_nw = static_cast<index_t>(::floor(ix));
index_t iy_nw = static_cast<index_t>(::floor(iy));
index_t ix_ne = ix_nw + 1;
index_t iy_ne = iy_nw;
index_t ix_sw = ix_nw;
index_t iy_sw = iy_nw + 1;
index_t ix_se = ix_nw + 1;
index_t iy_se = iy_nw + 1;
// get surfaces to each neighbor:
scalar_t nw = (ix_se - ix) * (iy_se - iy);
scalar_t ne = (ix - ix_sw) * (iy_sw - iy);
scalar_t sw = (ix_ne - ix) * (iy - iy_ne);
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
scalar_t gOut = *gOut_ptr_NCHW;
if (input_requires_grad) {
// calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd].
safe_add_2d(grad_input.data, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut, NC_offset, grad_input_memory_span);
safe_add_2d(grad_input.data, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut, NC_offset, grad_input_memory_span);
safe_add_2d(grad_input.data, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut, NC_offset, grad_input_memory_span);
safe_add_2d(grad_input.data, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut, NC_offset, grad_input_memory_span);
}
// calculate grad_grid
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW];
gix -= nw_val * (iy_se - iy) * gOut;
giy -= nw_val * (ix_se - ix) * gOut;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW];
gix += ne_val * (iy_sw - iy) * gOut;
giy -= ne_val * (ix - ix_sw) * gOut;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW];
gix -= sw_val * (iy - iy_ne) * gOut;
giy += sw_val * (ix_ne - ix) * gOut;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW];
gix += se_val * (iy - iy_nw) * gOut;
giy += se_val * (ix - ix_nw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = gix_mult * gix;
gGrid_ptr_NHW[1] = giy_mult * giy;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
if (input_requires_grad) {
index_t ix_nearest = static_cast<index_t>(std::round(ix));
index_t iy_nearest = static_cast<index_t>(std::round(iy));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
for (index_t c = 0; c < C; ++c, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
// calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd].
safe_add_2d(grad_input.data, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW, NC_offset, grad_input_memory_span);
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NHW[1] = static_cast<scalar_t>(0);
} else if (interpolation_mode == GridSamplerInterpolation::Bicubic) {
ix = grid_sampler_unnormalize_set_grad(x, inp_W, align_corners, &gix_mult);
iy = grid_sampler_unnormalize_set_grad(y, inp_H, align_corners, &giy_mult);
scalar_t ix_nw = ::floor(ix);
scalar_t iy_nw = ::floor(iy);
const scalar_t tx = ix - ix_nw;
const scalar_t ty = iy - iy_nw;
scalar_t x_coeffs[4];
scalar_t y_coeffs[4];
scalar_t x_coeffs_grad[4];
scalar_t y_coeffs_grad[4];
get_cubic_upsampling_coefficients<scalar_t>(x_coeffs, tx);
get_cubic_upsampling_coefficients<scalar_t>(y_coeffs, ty);
get_cubic_coefficients_grad<scalar_t>(x_coeffs_grad, tx);
get_cubic_coefficients_grad<scalar_t>(y_coeffs_grad, ty);
scalar_t gix = static_cast<scalar_t>(0);
scalar_t giy = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
for (index_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC+= inp_sC) {
scalar_t gOut = *gOut_ptr_NCHW;
#pragma unroll 4
for (index_t i = 0; i < 4; ++i) {
#pragma unroll 4
for (index_t j = 0; j < 4; ++j) {
if (input_requires_grad) {
// set input gradient. See Note [Passing pointer and offset to fastAtomicAdd].
add_value_bounded<scalar_t>(grad_input.data, ix_nw - 1 + i, iy_nw - 1 + j, inp_W, inp_H, gInp_sW, gInp_sH,
gOut * x_coeffs[i] * y_coeffs[j],
padding_mode,
align_corners,
NC_offset,
grad_input_memory_span);
}
// set grid gradient
scalar_t val = get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1 + i, iy_nw - 1 + j,
inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners);
gix -= val * x_coeffs_grad[i] * y_coeffs[j] * gOut;
giy -= val * y_coeffs_grad[j] * x_coeffs[i] * gOut;
}
}
}
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = gix_mult * gix;
gGrid_ptr_NHW[1] = giy_mult * giy;
}
}
}
template <typename scalar_t, typename index_t>
C10_LAUNCH_BOUNDS_1(256)
__global__ void grid_sampler_3d_backward_kernel(
const index_t nthreads,
TensorInfo<scalar_t, index_t> grad_output,
TensorInfo<scalar_t, index_t> input,
TensorInfo<scalar_t, index_t> grid,
TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros (or unused if input_requires_grad is false)
TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners,
const index_t grad_input_memory_span,
const bool input_requires_grad) {
index_t C = input.sizes[1];
index_t inp_D = input.sizes[2];
index_t inp_H = input.sizes[3];
index_t inp_W = input.sizes[4];
index_t out_D = grid.sizes[1];
index_t out_H = grid.sizes[2];
index_t out_W = grid.sizes[3];
index_t inp_sN = input.strides[0];
index_t inp_sC = input.strides[1];
index_t inp_sD = input.strides[2];
index_t inp_sH = input.strides[3];
index_t inp_sW = input.strides[4];
index_t grid_sN = grid.strides[0];
index_t grid_sD = grid.strides[1];
index_t grid_sH = grid.strides[2];
index_t grid_sW = grid.strides[3];
index_t grid_sCoor = grid.strides[4];
index_t gOut_sN = grad_output.strides[0];
index_t gOut_sC = grad_output.strides[1];
index_t gOut_sD = grad_output.strides[2];
index_t gOut_sH = grad_output.strides[3];
index_t gOut_sW = grad_output.strides[4];
// gInp_* (and NC_offset below) are not really needed if input_requires_grad is false.
int64_t gInp_sN = 0;
int64_t gInp_sC = 0;
int64_t gInp_sD = 0;
int64_t gInp_sH = 0;
int64_t gInp_sW = 0;
if (input_requires_grad) {
gInp_sN = grad_input.strides[0];
gInp_sC = grad_input.strides[1];
gInp_sD = grad_input.strides[2];
gInp_sH = grad_input.strides[3];
gInp_sW = grad_input.strides[4];
}
index_t gGrid_sW = grad_grid.strides[3];
CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) {
const index_t w = index % out_W;
const index_t h = (index / out_W) % out_H;
const index_t d = (index / (out_H * out_W)) % out_D;
const index_t n = index / (out_D * out_H * out_W);
const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor];
// multipliers for gradients on ix, iy, and iz
scalar_t gix_mult, giy_mult, giz_mult;
ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult);
iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult);
iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
index_t ix_tnw = static_cast<index_t>(::floor(ix));
index_t iy_tnw = static_cast<index_t>(::floor(iy));
index_t iz_tnw = static_cast<index_t>(::floor(iz));
index_t ix_tne = ix_tnw + 1;
index_t iy_tne = iy_tnw;
index_t iz_tne = iz_tnw;
index_t ix_tsw = ix_tnw;
index_t iy_tsw = iy_tnw + 1;
index_t iz_tsw = iz_tnw;
index_t ix_tse = ix_tnw + 1;
index_t iy_tse = iy_tnw + 1;
index_t iz_tse = iz_tnw;
index_t ix_bnw = ix_tnw;
index_t iy_bnw = iy_tnw;
index_t iz_bnw = iz_tnw + 1;
index_t ix_bne = ix_tnw + 1;
index_t iy_bne = iy_tnw;
index_t iz_bne = iz_tnw + 1;
index_t ix_bsw = ix_tnw;
index_t iy_bsw = iy_tnw + 1;
index_t iz_bsw = iz_tnw + 1;
index_t ix_bse = ix_tnw + 1;
index_t iy_bse = iy_tnw + 1;
index_t iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
index_t NC_offset;
if (input_requires_grad) {
NC_offset = n * gInp_sN;
}
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
// calculate bilinear weighted pixel value and set output pixel
for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC += inp_sC) {
scalar_t gOut = *gOut_ptr_NCDHW;
// calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd].
if (input_requires_grad) {
safe_add_3d(grad_input.data, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut,
NC_offset, grad_input_memory_span);
}
// calculate grad_grid
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW];
gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut;
giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut;
giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW];
gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut;
giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut;
giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW];
gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut;
giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut;
giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW];
gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut;
giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut;
giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW];
gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut;
giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut;
giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW];
gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut;
giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut;
giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW];
gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut;
giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut;
giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW];
gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut;
giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut;
giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = gix_mult * gix;
gGrid_ptr_NDHW[1] = giy_mult * giy;
gGrid_ptr_NDHW[2] = giz_mult * giz;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
if (input_requires_grad) {
auto ix_nearest = static_cast<index_t>(std::round(ix));
auto iy_nearest = static_cast<index_t>(std::round(iy));
auto iz_nearest = static_cast<index_t>(std::round(iz));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC) {
// calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd].
safe_add_3d(grad_input.data, iz_nearest, iy_nearest, ix_nearest,
gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW,
NC_offset, grad_input_memory_span);
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0);
}
}
}
} // namespace
void launch_grid_sampler_2d_forward_kernel(
const TensorBase &output, const TensorBase &input, const TensorBase &grid,
int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
// See NOTE [ grid_sampler Native Functions ].
// Add checks here in case this is called instead of grid_sampler.
check_grid_sampler_common(input, grid);
check_grid_sampler_2d(input, grid);
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
int64_t count = N * H * W;
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] {
if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) &&
canUse32BitIndexMath(output)) {
hipLaunchKernelGGL(( grid_sampler_2d_kernel<scalar_t>)
, dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
static_cast<int>(count),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( grid_sampler_2d_kernel<scalar_t>)
, dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int64_t>(input),
getTensorInfo<scalar_t, int64_t>(grid),
getTensorInfo<scalar_t, int64_t>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
}
}
void launch_grid_sampler_3d_forward_kernel(
const TensorBase &output, const TensorBase &input, const TensorBase &grid,
int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
// See NOTE [ grid_sampler Native Functions ].
// Add checks here in case this is called instead of grid_sampler.
check_grid_sampler_common(input, grid);
check_grid_sampler_3d(input, grid, interpolation_mode);
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
int64_t count = N * D * H * W;
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_cuda", [&] {
if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) &&
canUse32BitIndexMath(output)) {
hipLaunchKernelGGL(( grid_sampler_3d_kernel<scalar_t>)
, dim3(GET_BLOCKS(count, 512)), dim3(512), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
static_cast<int>(count),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( grid_sampler_3d_kernel<scalar_t>)
, dim3(GET_BLOCKS(count, 512)), dim3(512), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int64_t>(input),
getTensorInfo<scalar_t, int64_t>(grid),
getTensorInfo<scalar_t, int64_t>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
}
}
void launch_grid_sampler_2d_backward_kernel(
const TensorBase &grad_input, const TensorBase &grad_grid,
const TensorBase &grad_output, const TensorBase &input,
const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode,
bool align_corners, std::array<bool,2> output_mask) {
// See NOTE [ grid_sampler Native Functions ].
// Add checks here in case this is called instead of grid_sampler.
check_grid_sampler_common(input, grid);
check_grid_sampler_2d(input, grid);
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("grid_sampler_2d_backward_cuda");
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
// If `input` gradient is not required, we skip computing it -- not needing to create
// the tensor to hold the gradient can markedly increase performance. (`grid` gradient
// is always computed.)
auto input_requires_grad = output_mask[0];
int64_t count = N * H * W;
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] {
if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) &&
canUse32BitIndexMath(grad_output)) {
hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel<scalar_t>)
, dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
static_cast<int>(count),
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int>(grad_input) : TensorInfo<scalar_t, int>(),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners,
/*grad_input_memory_span =*/input_requires_grad ? static_cast<int>(grad_input.numel()) : 0,
input_requires_grad);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel<scalar_t>)
, dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int64_t>(grad_output),
getTensorInfo<scalar_t, int64_t>(input),
getTensorInfo<scalar_t, int64_t>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int64_t>(grad_input) : TensorInfo<scalar_t, int64_t>(),
getTensorInfo<scalar_t, int64_t>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners,
/*grad_input_memory_span =*/input_requires_grad ? grad_input.numel() : 0,
input_requires_grad);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
}
}
void launch_grid_sampler_3d_backward_kernel(
const TensorBase &grad_input, const TensorBase &grad_grid,
const TensorBase& grad_output, const TensorBase& input,
const TensorBase& grid, int64_t interpolation_mode, int64_t padding_mode,
bool align_corners, std::array<bool,2> output_mask) {
// See NOTE [ grid_sampler Native Functions ].
// Add checks here in case this is called instead of grid_sampler.
check_grid_sampler_common(input, grid);
check_grid_sampler_3d(input, grid, interpolation_mode);
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("grid_sampler_3d_backward_cuda");
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
int64_t count = N * D * H * W;
auto input_requires_grad = output_mask[0];
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] {
if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) &&
canUse32BitIndexMath(grad_output)) {
hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel<scalar_t>)
, dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
static_cast<int>(count),
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int>(grad_input) : TensorInfo<scalar_t, int>(),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners,
/*grad_input_memory_span =*/input_requires_grad ? static_cast<int>(grad_input.numel()) : 0,
input_requires_grad);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel<scalar_t>)
, dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int64_t>(grad_output),
getTensorInfo<scalar_t, int64_t>(input),
getTensorInfo<scalar_t, int64_t>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int64_t>(grad_input) : TensorInfo<scalar_t, int64_t>(),
getTensorInfo<scalar_t, int64_t>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners,
/*grad_input_memory_span =*/input_requires_grad ? grad_input.numel() : 0,
input_requires_grad);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
}
}
} // namespace at::native
|
4a9888ea47a5beb72130c8285126426e73ab050e.cu
|
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/OpMathType.h>
#include <ATen/native/cuda/GridSampler.h>
#include <ATen/native/GridSamplerUtils.h>
#include <ATen/native/cuda/GridSampler.cuh>
#include <ATen/native/cuda/UpSample.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/core/TensorBase.h>
#include <ATen/Dispatch.h>
#include <c10/macros/Macros.h>
#include <cmath>
namespace at::native {
using namespace at::cuda::detail;
using at::native::detail::GridSamplerInterpolation;
using at::native::detail::GridSamplerPadding;
namespace {
template <typename scalar_t, typename index_t>
C10_LAUNCH_BOUNDS_1(256)
__global__ void grid_sampler_2d_kernel(
const index_t nthreads,
TensorInfo<scalar_t, index_t> input,
TensorInfo<scalar_t, index_t> grid,
TensorInfo<scalar_t, index_t> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
using opmath_t = at::opmath_type<scalar_t>;
index_t C = input.sizes[1];
index_t inp_H = input.sizes[2];
index_t inp_W = input.sizes[3];
index_t out_H = grid.sizes[1];
index_t out_W = grid.sizes[2];
index_t inp_sN = input.strides[0];
index_t inp_sC = input.strides[1];
index_t inp_sH = input.strides[2];
index_t inp_sW = input.strides[3];
index_t grid_sN = grid.strides[0];
index_t grid_sH = grid.strides[1];
index_t grid_sW = grid.strides[2];
index_t grid_sCoor = grid.strides[3];
index_t out_sN = output.strides[0];
index_t out_sC = output.strides[1];
index_t out_sH = output.strides[2];
index_t out_sW = output.strides[3];
CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) {
const index_t w = index % out_W;
const index_t h = (index / out_W) % out_H;
const index_t n = index / (out_H * out_W);
const index_t grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
opmath_t x = grid.data[grid_offset];
opmath_t y = grid.data[grid_offset + grid_sCoor];
opmath_t ix = grid_sampler_compute_source_index(x, inp_W, padding_mode, align_corners);
opmath_t iy = grid_sampler_compute_source_index(y, inp_H, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
index_t ix_nw = static_cast<index_t>(::floor(ix));
index_t iy_nw = static_cast<index_t>(::floor(iy));
index_t ix_ne = ix_nw + 1;
index_t iy_ne = iy_nw;
index_t ix_sw = ix_nw;
index_t iy_sw = iy_nw + 1;
index_t ix_se = ix_nw + 1;
index_t iy_se = iy_nw + 1;
// get surfaces to each neighbor:
opmath_t nw = (ix_se - ix) * (iy_se - iy);
opmath_t ne = (ix - ix_sw) * (iy_sw - iy);
opmath_t sw = (ix_ne - ix) * (iy - iy_ne);
opmath_t se = (ix - ix_nw) * (iy - iy_nw);
// calculate bilinear weighted pixel value and set output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
opmath_t out_acc = 0;
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se;
}
*out_ptr_NCHW = out_acc;
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
index_t ix_nearest = static_cast<index_t>(std::nearbyint(ix));
index_t iy_nearest = static_cast<index_t>(std::nearbyint(iy));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) {
*out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCHW = static_cast<scalar_t>(0);
}
}
} else if (interpolation_mode == GridSamplerInterpolation::Bicubic) {
ix = grid_sampler_unnormalize(x, inp_W, align_corners);
iy = grid_sampler_unnormalize(y, inp_H, align_corners);
opmath_t ix_nw = std::floor(ix);
opmath_t iy_nw = std::floor(iy);
const opmath_t tx = ix - ix_nw;
const opmath_t ty = iy - iy_nw;
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
opmath_t coefficients[4];
#pragma unroll 4
for (index_t i = 0; i < 4; ++i) {
coefficients[i] = cubic_interp1d(
get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners),
get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 0, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners),
get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners),
get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 2, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners),
tx);
}
*out_ptr_NCHW = cubic_interp1d(
coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
ty);
}
}
}
}
template <typename scalar_t, typename index_t>
C10_LAUNCH_BOUNDS_1(512)
__global__ void grid_sampler_3d_kernel(
const index_t nthreads,
TensorInfo<scalar_t, index_t> input,
TensorInfo<scalar_t, index_t> grid,
TensorInfo<scalar_t, index_t> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
using opmath_t = at::opmath_type<scalar_t>;
index_t C = input.sizes[1];
index_t inp_D = input.sizes[2];
index_t inp_H = input.sizes[3];
index_t inp_W = input.sizes[4];
index_t out_D = grid.sizes[1];
index_t out_H = grid.sizes[2];
index_t out_W = grid.sizes[3];
index_t inp_sN = input.strides[0];
index_t inp_sC = input.strides[1];
index_t inp_sD = input.strides[2];
index_t inp_sH = input.strides[3];
index_t inp_sW = input.strides[4];
index_t grid_sN = grid.strides[0];
index_t grid_sD = grid.strides[1];
index_t grid_sH = grid.strides[2];
index_t grid_sW = grid.strides[3];
index_t grid_sCoor = grid.strides[4];
index_t out_sN = output.strides[0];
index_t out_sC = output.strides[1];
index_t out_sD = output.strides[2];
index_t out_sH = output.strides[3];
index_t out_sW = output.strides[4];
CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) {
const index_t w = index % out_W;
const index_t h = (index / out_W) % out_H;
const index_t d = (index / (out_H * out_W)) % out_D;
const index_t n = index / (out_D * out_H * out_W);
const index_t grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
opmath_t x = grid.data[grid_offset];
opmath_t y = grid.data[grid_offset + grid_sCoor];
opmath_t z = grid.data[grid_offset + 2 * grid_sCoor];
opmath_t ix = grid_sampler_compute_source_index(x, inp_W, padding_mode, align_corners);
opmath_t iy = grid_sampler_compute_source_index(y, inp_H, padding_mode, align_corners);
opmath_t iz = grid_sampler_compute_source_index(z, inp_D, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
index_t ix_tnw = static_cast<index_t>(::floor(ix));
index_t iy_tnw = static_cast<index_t>(::floor(iy));
index_t iz_tnw = static_cast<index_t>(::floor(iz));
index_t ix_tne = ix_tnw + 1;
index_t iy_tne = iy_tnw;
index_t iz_tne = iz_tnw;
index_t ix_tsw = ix_tnw;
index_t iy_tsw = iy_tnw + 1;
index_t iz_tsw = iz_tnw;
index_t ix_tse = ix_tnw + 1;
index_t iy_tse = iy_tnw + 1;
index_t iz_tse = iz_tnw;
index_t ix_bnw = ix_tnw;
index_t iy_bnw = iy_tnw;
index_t iz_bnw = iz_tnw + 1;
index_t ix_bne = ix_tnw + 1;
index_t iy_bne = iy_tnw;
index_t iz_bne = iz_tnw + 1;
index_t ix_bsw = ix_tnw;
index_t iy_bsw = iy_tnw + 1;
index_t iz_bsw = iz_tnw + 1;
index_t ix_bse = ix_tnw + 1;
index_t iy_bse = iy_tnw + 1;
index_t iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
opmath_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
opmath_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
opmath_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
opmath_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
opmath_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
opmath_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
opmath_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
opmath_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
// (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne
// + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse
// + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne
// + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse
opmath_t out_acc = 0;
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
out_acc += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse;
}
*out_ptr_NCDHW = out_acc;
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
index_t ix_nearest = static_cast<index_t>(std::round(ix));
index_t iy_nearest = static_cast<index_t>(std::round(iy));
index_t iz_nearest = static_cast<index_t>(std::round(iz));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCDHW = static_cast<scalar_t>(0);
}
}
}
}
}
// Note [Passing pointer and offset to fastAtomicAdd]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// For its internal bounds checking, fastAtomicAdd needs to know where the destination address
// lies relative to the entire tensor, so we pass the base grad_input.data and full offset information,
// including batch * channel offset (NC_offset).
template <typename scalar_t, typename index_t>
C10_LAUNCH_BOUNDS_1(256)
__global__ void grid_sampler_2d_backward_kernel(
const index_t nthreads,
TensorInfo<scalar_t, index_t> grad_output,
TensorInfo<scalar_t, index_t> input,
TensorInfo<scalar_t, index_t> grid,
TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros (or unused if input_requires_grad is false)
TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners,
const index_t grad_input_memory_span,
const bool input_requires_grad) {
index_t C = input.sizes[1];
index_t inp_H = input.sizes[2];
index_t inp_W = input.sizes[3];
index_t out_H = grid.sizes[1];
index_t out_W = grid.sizes[2];
index_t inp_sN = input.strides[0];
index_t inp_sC = input.strides[1];
index_t inp_sH = input.strides[2];
index_t inp_sW = input.strides[3];
index_t grid_sN = grid.strides[0];
index_t grid_sH = grid.strides[1];
index_t grid_sW = grid.strides[2];
index_t grid_sCoor = grid.strides[3];
index_t gOut_sN = grad_output.strides[0];
index_t gOut_sC = grad_output.strides[1];
index_t gOut_sH = grad_output.strides[2];
index_t gOut_sW = grad_output.strides[3];
// gInp_* (and NC_offset below) are not really needed if input_requires_grad is false.
index_t gInp_sN;
index_t gInp_sC;
index_t gInp_sH;
index_t gInp_sW;
if (input_requires_grad) {
gInp_sN = grad_input.strides[0];
gInp_sC = grad_input.strides[1];
gInp_sH = grad_input.strides[2];
gInp_sW = grad_input.strides[3];
}
index_t gGrid_sW = grad_grid.strides[2];
CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) {
const index_t w = index % out_W;
const index_t h = (index / out_W) % out_H;
const index_t n = index / (out_H * out_W);
const auto grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
scalar_t x = grid.data[grid_offset];
scalar_t y = grid.data[grid_offset + grid_sCoor];
// multipliers for gradients on ix and iy
scalar_t gix_mult, giy_mult;
scalar_t ix = grid_sampler_compute_source_index_set_grad(x, inp_W, padding_mode, align_corners, &gix_mult);
scalar_t iy = grid_sampler_compute_source_index_set_grad(y, inp_H, padding_mode, align_corners, &giy_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
index_t ix_nw = static_cast<index_t>(std::floor(ix));
index_t iy_nw = static_cast<index_t>(std::floor(iy));
index_t ix_ne = ix_nw + 1;
index_t iy_ne = iy_nw;
index_t ix_sw = ix_nw;
index_t iy_sw = iy_nw + 1;
index_t ix_se = ix_nw + 1;
index_t iy_se = iy_nw + 1;
// get surfaces to each neighbor:
scalar_t nw = (ix_se - ix) * (iy_se - iy);
scalar_t ne = (ix - ix_sw) * (iy_sw - iy);
scalar_t sw = (ix_ne - ix) * (iy - iy_ne);
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
scalar_t gOut = *gOut_ptr_NCHW;
if (input_requires_grad) {
// calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd].
safe_add_2d(grad_input.data, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut, NC_offset, grad_input_memory_span);
safe_add_2d(grad_input.data, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut, NC_offset, grad_input_memory_span);
safe_add_2d(grad_input.data, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut, NC_offset, grad_input_memory_span);
safe_add_2d(grad_input.data, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut, NC_offset, grad_input_memory_span);
}
// calculate grad_grid
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW];
gix -= nw_val * (iy_se - iy) * gOut;
giy -= nw_val * (ix_se - ix) * gOut;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW];
gix += ne_val * (iy_sw - iy) * gOut;
giy -= ne_val * (ix - ix_sw) * gOut;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW];
gix -= sw_val * (iy - iy_ne) * gOut;
giy += sw_val * (ix_ne - ix) * gOut;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW];
gix += se_val * (iy - iy_nw) * gOut;
giy += se_val * (ix - ix_nw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = gix_mult * gix;
gGrid_ptr_NHW[1] = giy_mult * giy;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
if (input_requires_grad) {
index_t ix_nearest = static_cast<index_t>(std::round(ix));
index_t iy_nearest = static_cast<index_t>(std::round(iy));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
for (index_t c = 0; c < C; ++c, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
// calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd].
safe_add_2d(grad_input.data, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW, NC_offset, grad_input_memory_span);
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NHW[1] = static_cast<scalar_t>(0);
} else if (interpolation_mode == GridSamplerInterpolation::Bicubic) {
ix = grid_sampler_unnormalize_set_grad(x, inp_W, align_corners, &gix_mult);
iy = grid_sampler_unnormalize_set_grad(y, inp_H, align_corners, &giy_mult);
scalar_t ix_nw = std::floor(ix);
scalar_t iy_nw = std::floor(iy);
const scalar_t tx = ix - ix_nw;
const scalar_t ty = iy - iy_nw;
scalar_t x_coeffs[4];
scalar_t y_coeffs[4];
scalar_t x_coeffs_grad[4];
scalar_t y_coeffs_grad[4];
get_cubic_upsampling_coefficients<scalar_t>(x_coeffs, tx);
get_cubic_upsampling_coefficients<scalar_t>(y_coeffs, ty);
get_cubic_coefficients_grad<scalar_t>(x_coeffs_grad, tx);
get_cubic_coefficients_grad<scalar_t>(y_coeffs_grad, ty);
scalar_t gix = static_cast<scalar_t>(0);
scalar_t giy = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
for (index_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC+= inp_sC) {
scalar_t gOut = *gOut_ptr_NCHW;
#pragma unroll 4
for (index_t i = 0; i < 4; ++i) {
#pragma unroll 4
for (index_t j = 0; j < 4; ++j) {
if (input_requires_grad) {
// set input gradient. See Note [Passing pointer and offset to fastAtomicAdd].
add_value_bounded<scalar_t>(grad_input.data, ix_nw - 1 + i, iy_nw - 1 + j, inp_W, inp_H, gInp_sW, gInp_sH,
gOut * x_coeffs[i] * y_coeffs[j],
padding_mode,
align_corners,
NC_offset,
grad_input_memory_span);
}
// set grid gradient
scalar_t val = get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1 + i, iy_nw - 1 + j,
inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners);
gix -= val * x_coeffs_grad[i] * y_coeffs[j] * gOut;
giy -= val * y_coeffs_grad[j] * x_coeffs[i] * gOut;
}
}
}
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = gix_mult * gix;
gGrid_ptr_NHW[1] = giy_mult * giy;
}
}
}
template <typename scalar_t, typename index_t>
C10_LAUNCH_BOUNDS_1(256)
__global__ void grid_sampler_3d_backward_kernel(
const index_t nthreads,
TensorInfo<scalar_t, index_t> grad_output,
TensorInfo<scalar_t, index_t> input,
TensorInfo<scalar_t, index_t> grid,
TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros (or unused if input_requires_grad is false)
TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners,
const index_t grad_input_memory_span,
const bool input_requires_grad) {
index_t C = input.sizes[1];
index_t inp_D = input.sizes[2];
index_t inp_H = input.sizes[3];
index_t inp_W = input.sizes[4];
index_t out_D = grid.sizes[1];
index_t out_H = grid.sizes[2];
index_t out_W = grid.sizes[3];
index_t inp_sN = input.strides[0];
index_t inp_sC = input.strides[1];
index_t inp_sD = input.strides[2];
index_t inp_sH = input.strides[3];
index_t inp_sW = input.strides[4];
index_t grid_sN = grid.strides[0];
index_t grid_sD = grid.strides[1];
index_t grid_sH = grid.strides[2];
index_t grid_sW = grid.strides[3];
index_t grid_sCoor = grid.strides[4];
index_t gOut_sN = grad_output.strides[0];
index_t gOut_sC = grad_output.strides[1];
index_t gOut_sD = grad_output.strides[2];
index_t gOut_sH = grad_output.strides[3];
index_t gOut_sW = grad_output.strides[4];
// gInp_* (and NC_offset below) are not really needed if input_requires_grad is false.
int64_t gInp_sN = 0;
int64_t gInp_sC = 0;
int64_t gInp_sD = 0;
int64_t gInp_sH = 0;
int64_t gInp_sW = 0;
if (input_requires_grad) {
gInp_sN = grad_input.strides[0];
gInp_sC = grad_input.strides[1];
gInp_sD = grad_input.strides[2];
gInp_sH = grad_input.strides[3];
gInp_sW = grad_input.strides[4];
}
index_t gGrid_sW = grad_grid.strides[3];
CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) {
const index_t w = index % out_W;
const index_t h = (index / out_W) % out_H;
const index_t d = (index / (out_H * out_W)) % out_D;
const index_t n = index / (out_D * out_H * out_W);
const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor];
// multipliers for gradients on ix, iy, and iz
scalar_t gix_mult, giy_mult, giz_mult;
ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult);
iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult);
iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
index_t ix_tnw = static_cast<index_t>(std::floor(ix));
index_t iy_tnw = static_cast<index_t>(std::floor(iy));
index_t iz_tnw = static_cast<index_t>(std::floor(iz));
index_t ix_tne = ix_tnw + 1;
index_t iy_tne = iy_tnw;
index_t iz_tne = iz_tnw;
index_t ix_tsw = ix_tnw;
index_t iy_tsw = iy_tnw + 1;
index_t iz_tsw = iz_tnw;
index_t ix_tse = ix_tnw + 1;
index_t iy_tse = iy_tnw + 1;
index_t iz_tse = iz_tnw;
index_t ix_bnw = ix_tnw;
index_t iy_bnw = iy_tnw;
index_t iz_bnw = iz_tnw + 1;
index_t ix_bne = ix_tnw + 1;
index_t iy_bne = iy_tnw;
index_t iz_bne = iz_tnw + 1;
index_t ix_bsw = ix_tnw;
index_t iy_bsw = iy_tnw + 1;
index_t iz_bsw = iz_tnw + 1;
index_t ix_bse = ix_tnw + 1;
index_t iy_bse = iy_tnw + 1;
index_t iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
index_t NC_offset;
if (input_requires_grad) {
NC_offset = n * gInp_sN;
}
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
// calculate bilinear weighted pixel value and set output pixel
for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC += inp_sC) {
scalar_t gOut = *gOut_ptr_NCDHW;
// calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd].
if (input_requires_grad) {
safe_add_3d(grad_input.data, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut,
NC_offset, grad_input_memory_span);
safe_add_3d(grad_input.data, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut,
NC_offset, grad_input_memory_span);
}
// calculate grad_grid
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW];
gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut;
giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut;
giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW];
gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut;
giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut;
giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW];
gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut;
giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut;
giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW];
gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut;
giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut;
giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW];
gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut;
giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut;
giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW];
gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut;
giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut;
giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW];
gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut;
giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut;
giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW];
gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut;
giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut;
giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = gix_mult * gix;
gGrid_ptr_NDHW[1] = giy_mult * giy;
gGrid_ptr_NDHW[2] = giz_mult * giz;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
if (input_requires_grad) {
auto ix_nearest = static_cast<index_t>(std::round(ix));
auto iy_nearest = static_cast<index_t>(std::round(iy));
auto iz_nearest = static_cast<index_t>(std::round(iz));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
index_t NC_offset = n * gInp_sN;
for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC) {
// calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd].
safe_add_3d(grad_input.data, iz_nearest, iy_nearest, ix_nearest,
gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW,
NC_offset, grad_input_memory_span);
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0);
}
}
}
} // namespace
void launch_grid_sampler_2d_forward_kernel(
const TensorBase &output, const TensorBase &input, const TensorBase &grid,
int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
// See NOTE [ grid_sampler Native Functions ].
// Add checks here in case this is called instead of grid_sampler.
check_grid_sampler_common(input, grid);
check_grid_sampler_2d(input, grid);
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
int64_t count = N * H * W;
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] {
if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) &&
canUse32BitIndexMath(output)) {
grid_sampler_2d_kernel<scalar_t>
<<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>(
static_cast<int>(count),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
grid_sampler_2d_kernel<scalar_t>
<<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int64_t>(input),
getTensorInfo<scalar_t, int64_t>(grid),
getTensorInfo<scalar_t, int64_t>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
}
}
void launch_grid_sampler_3d_forward_kernel(
const TensorBase &output, const TensorBase &input, const TensorBase &grid,
int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
// See NOTE [ grid_sampler Native Functions ].
// Add checks here in case this is called instead of grid_sampler.
check_grid_sampler_common(input, grid);
check_grid_sampler_3d(input, grid, interpolation_mode);
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
int64_t count = N * D * H * W;
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_cuda", [&] {
if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) &&
canUse32BitIndexMath(output)) {
grid_sampler_3d_kernel<scalar_t>
<<<GET_BLOCKS(count, 512), 512, 0, at::cuda::getCurrentCUDAStream()>>>(
static_cast<int>(count),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
grid_sampler_3d_kernel<scalar_t>
<<<GET_BLOCKS(count, 512), 512, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int64_t>(input),
getTensorInfo<scalar_t, int64_t>(grid),
getTensorInfo<scalar_t, int64_t>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
}
}
void launch_grid_sampler_2d_backward_kernel(
const TensorBase &grad_input, const TensorBase &grad_grid,
const TensorBase &grad_output, const TensorBase &input,
const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode,
bool align_corners, std::array<bool,2> output_mask) {
// See NOTE [ grid_sampler Native Functions ].
// Add checks here in case this is called instead of grid_sampler.
check_grid_sampler_common(input, grid);
check_grid_sampler_2d(input, grid);
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("grid_sampler_2d_backward_cuda");
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
// If `input` gradient is not required, we skip computing it -- not needing to create
// the tensor to hold the gradient can markedly increase performance. (`grid` gradient
// is always computed.)
auto input_requires_grad = output_mask[0];
int64_t count = N * H * W;
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] {
if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) &&
canUse32BitIndexMath(grad_output)) {
grid_sampler_2d_backward_kernel<scalar_t>
<<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>(
static_cast<int>(count),
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int>(grad_input) : TensorInfo<scalar_t, int>(),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners,
/*grad_input_memory_span =*/input_requires_grad ? static_cast<int>(grad_input.numel()) : 0,
input_requires_grad);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
grid_sampler_2d_backward_kernel<scalar_t>
<<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int64_t>(grad_output),
getTensorInfo<scalar_t, int64_t>(input),
getTensorInfo<scalar_t, int64_t>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int64_t>(grad_input) : TensorInfo<scalar_t, int64_t>(),
getTensorInfo<scalar_t, int64_t>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners,
/*grad_input_memory_span =*/input_requires_grad ? grad_input.numel() : 0,
input_requires_grad);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
}
}
void launch_grid_sampler_3d_backward_kernel(
const TensorBase &grad_input, const TensorBase &grad_grid,
const TensorBase& grad_output, const TensorBase& input,
const TensorBase& grid, int64_t interpolation_mode, int64_t padding_mode,
bool align_corners, std::array<bool,2> output_mask) {
// See NOTE [ grid_sampler Native Functions ].
// Add checks here in case this is called instead of grid_sampler.
check_grid_sampler_common(input, grid);
check_grid_sampler_3d(input, grid, interpolation_mode);
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("grid_sampler_3d_backward_cuda");
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
int64_t count = N * D * H * W;
auto input_requires_grad = output_mask[0];
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] {
if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) &&
canUse32BitIndexMath(grad_output)) {
grid_sampler_3d_backward_kernel<scalar_t>
<<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>(
static_cast<int>(count),
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int>(grad_input) : TensorInfo<scalar_t, int>(),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners,
/*grad_input_memory_span =*/input_requires_grad ? static_cast<int>(grad_input.numel()) : 0,
input_requires_grad);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
grid_sampler_3d_backward_kernel<scalar_t>
<<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int64_t>(grad_output),
getTensorInfo<scalar_t, int64_t>(input),
getTensorInfo<scalar_t, int64_t>(grid),
input_requires_grad ? getTensorInfo<scalar_t, int64_t>(grad_input) : TensorInfo<scalar_t, int64_t>(),
getTensorInfo<scalar_t, int64_t>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners,
/*grad_input_memory_span =*/input_requires_grad ? grad_input.numel() : 0,
input_requires_grad);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
}
}
} // namespace at::native
|
bc610903045f0ba0143551eb694f26510c60aa5e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
//https://proofwiki.org/wiki/Product_of_Triangular_Matrices
int max_per_row = 0;
__global__
void devTrianglesCount(int* col_indx, int* csr_rows, int nnz, int rows, int* out_sum, int group_rows);
/**
* Description: Reads the data from the mtx files.
* The first row contains 3 integers: rows columns of the sparse graph
* and the number of non zero elements. The non zero elements are stored in
* COO format. Also the data have one-based indexing. While reading them we tra them
* to zero based indexing.
*
* @param data char[] the name of the file to read
* @param row_indx int* where the rows of the nnz are stored
* @param col_indx int* where the columns of the nnz are stored
* @param nnz int* the number of non zero elements
* @param rows int* the number of rows
* @param cols itn* the number of columns
*/
void readData(char data[], int **row_indx, int **col_indx, int* nnz, int * rows, int* cols){
FILE *f = fopen(data,"r");
fscanf(f, "%d %d %d\n",rows, cols, nnz);
printf("-READ %d %d %d\n",*rows,*cols,*nnz);
col_indx[0] = (int*)malloc((*nnz)*sizeof(int));
row_indx[0] = (int*)malloc((*nnz)*sizeof(int));
for(int i = 0; i < *nnz; i++){
fscanf(f, "%d %d", &col_indx[0][i] , &row_indx[0][i]);
// data have 1 base index
// transform to 0-based index
col_indx[0][i]--;
row_indx[0][i]--;
}
fclose(f);
}
/**
* Description: Returns an array with the non zero rows in compressed format: (length rows insteadn of nnz).
* Combined with the column index we have the CSR represantion of the sparse graph. Also finds the max non zero
* elements per row and updates the global variable max_per_row
*
* @param rows int
* @param nnz int
* @param row_indx int* the row vector from the COO format.
*
*
* Returns:
* csr_rows int*
*/
int* COOtoCSR(int rows, int nnz, int* row_indx){
// initialize
int* csr_rows = (int*)malloc(rows*sizeof(int));
for(int i = 0; i < rows; i++){
csr_rows[i] = 0;
}
// Transformation to CSR
for(int i = 0; i < nnz; i++){
int index = row_indx[i]+1;
if(index < rows)
csr_rows[index]++;
}
for(int i = 1; i < rows; i++){
if(csr_rows[i] > max_per_row){
max_per_row = csr_rows[i];
}
csr_rows[i] += csr_rows[i-1];
}
return csr_rows;
}
void printTime(struct timeval start, struct timeval end, char* str){
unsigned long ss,es,su,eu,s,u;
ss =start.tv_sec;
su = start.tv_usec;
es = end.tv_sec;
eu = end.tv_usec;
s = es - ss;
if(eu > su){
u = eu - su;
}else{
s--;
u = 1000000 + eu - su;
}
printf("%s,%lu,%lu\n",str,s,u);
}
int main(int argc, char** argv){
if(argc != 2){
printf("Invalid arguments\n");
return 1;
}
//hipDeviceReset();
struct timeval start,end,ALLSTART,ALLEND;
// "auto.mtx"; // "data.csv"; // "great-britain_osm.mtx"; // "delaunay_n22.mtx"; //
printf("-Dataset: %s\n",argv[1]);
int rows,cols,nnz;
int *col_indx, *row_indx;
int sum;
/* Read Data in COO format and transform to 0 based index */
gettimeofday(&start,NULL);
readData(argv[1],&row_indx,&col_indx,&nnz,&rows,&cols);
gettimeofday(&end,NULL);
printTime(start,end, "Read Data");
// Transform to CSR
gettimeofday(&start,NULL);
int* csr_rows = COOtoCSR(rows, nnz, row_indx);
// We no longer need row_indx since we have csr_rows
free(row_indx);
gettimeofday(&end,NULL);
printTime(start,end, "CSR");
printf("-MAX PER ROW = %d\n",max_per_row);
gettimeofday(&start,NULL);
hipError_t cuer;
int *cu_col_indx, *cu_csr_rows;
int* cu_sum;
cuer = hipMalloc(&cu_col_indx,nnz*sizeof(int));
printf("-%s\n",hipGetErrorName(cuer));
cuer = hipMalloc(&cu_csr_rows,rows*sizeof(int));
printf("-%s\n",hipGetErrorName(cuer));
cuer = hipMalloc(&cu_sum,rows*sizeof(int));
printf("-%s\n",hipGetErrorName(cuer));
cuer = hipMemcpy(cu_col_indx,col_indx,nnz*sizeof(int),hipMemcpyHostToDevice);
printf("-%s\n",hipGetErrorName(cuer));
cuer = hipMemcpy(cu_csr_rows,csr_rows,rows*sizeof(int),hipMemcpyHostToDevice);
printf("-%s\n",hipGetErrorName(cuer));
int* res = (int*)malloc(rows*sizeof(int));
for(int i = 0; i < rows; i++){
res[i] = 0;
}
hipMemcpy(cu_sum,res,rows*sizeof(int),hipMemcpyHostToDevice);
gettimeofday(&end,NULL);
printTime(start,end, "CUDA data transfer");
gettimeofday(&start,NULL);
//rows = 100;
int threads = max_per_row;
if(max_per_row > 64){
return 1;
}
int group_rows = 64/threads;
if(group_rows > 8){
group_rows = 8;
}
threads = threads * group_rows;
int blocksize = (1 + rows/group_rows)/(512*512) + 1;
printf("-blocksize %d %d\n", blocksize, 512*512);
printf("Group number: %d\n",group_rows);
printf("Threads = MaxNNZ*group_rows: %d %d %d \n",threads,max_per_row,group_rows);
printf("Row span = %d * %d = %d | actual rows %d\n",blocksize*512*512, group_rows, blocksize*group_rows*512*512,rows);
hipLaunchKernelGGL(( devTrianglesCount), dim3(dim3(512,512,blocksize)),dim3(threads), 0, 0, cu_col_indx, cu_csr_rows, nnz, rows, cu_sum, group_rows);
printf("-%s\n",hipGetErrorName(cuer));
cuer = hipMemcpy(res,cu_sum,rows*sizeof(int),hipMemcpyDeviceToHost);
printf("-%s\n",hipGetErrorName(cuer));
sum = 0;
for(int i = 0; i < rows; i++){
if(res[i] > 0)
sum += res[i];
}
printf("-Cuda triangles = %d\n",sum);
gettimeofday(&end,NULL);
printTime(start,end,"CUDA");
}
__global__
void devTrianglesCount(int* col_indx, int* csr_rows, int nnz, int num_of_rows, int* out_sum, int group_rows){
int row = blockIdx.x*gridDim.y*gridDim.z + blockIdx.y*gridDim.z + blockIdx.z;
int id = threadIdx.x;
int own_group = -1;
int group_offset = 0;
if(row*group_rows >= num_of_rows){
return;
}
//max group number = 16
__shared__ int sh_group_rows;
__shared__ int start_row[16];
__shared__ int end_row[16];
__shared__ int len[16];
__shared__ int* row_ptr;
__shared__ int current_row[64];
__shared__ int sh_len[64];
__shared__ int* sh_ptr[64];
__shared__ int sh_cols[64][64];
__shared__ int sh_sum[64];
sh_sum[id] = 0;
end_row[id] = 0;
start_row[id] = 0;
__syncthreads();
// Get the current rows
sh_len[id] = 0;
if(id == 0){
sh_group_rows = group_rows-1;
for(int i = 0; i < group_rows; i++){
int t_row = row*group_rows + i; // temp row
start_row[i] = csr_rows[t_row];
if(t_row == num_of_rows - 1){
sh_group_rows = i-1;
end_row[i] = nnz;
}else{
end_row[i] = csr_rows[t_row+1];
}
len[i] = end_row[i] - start_row[i];
}
row_ptr = &col_indx[start_row[0]];
}
__syncthreads();
// if(id == 0){
// start_row = csr_rows[row];
// if(row == num_of_rows-1){
// end_row = nnz;
// }else{
// end_row = csr_rows[row+1];
// }
// len = end_row - start_row;
// row_ptr = &col_indx[start_row];
// }
// __syncthreads();
if(id < end_row[sh_group_rows] - start_row[0]){
current_row[id] = row_ptr[id];
}
__syncthreads();
// Assign each thread to a group
for(int i = 0; i < sh_group_rows+1; i++){
//printf("len %d \n",end_row[i] - start_row[0]);
if(id < end_row[i] - start_row[0]){
own_group = i;
group_offset = 0;
if(i > 0){
group_offset = end_row[i-1]-start_row[0];
}
break;
}
}
__syncthreads();
if(row < 50){
// printf("id %d group offset %d \n",id,group_offset);
}
// Get info for each column
if(own_group >= 0){
int tmp_col = current_row[id];
//printf("ID %d, group %d %d, row %d, len %d , own %d END %d START %d\n", id,sh_group_rows, group_offset, row,len[0], own_group,tmp_col,1);
int tmp_start = csr_rows[tmp_col];
int tmp_end;
if(tmp_col == num_of_rows-1){
tmp_end = nnz;
}else{
tmp_end = csr_rows[tmp_col+1];
}
sh_len[id] = tmp_end - tmp_start;
sh_ptr[id] = &col_indx[tmp_start];
}
__syncthreads();
for(int i = 0; i < end_row[sh_group_rows]-start_row[0]; i++){
if(id < sh_len[i]){
sh_cols[i][id] = sh_ptr[i][id];
}
}
__syncthreads();
if(own_group >= 0){
int a = 0;
int b = 0;
int sum = 0;
while(1){
if(a == len[own_group] || b == sh_len[id]){
break;
}
int b1 = current_row[a + group_offset] == sh_cols[id][b];
int b2 = current_row[a + group_offset] > sh_cols[id][b];
int b3 = current_row[a + group_offset] < sh_cols[id][b];
a = a + b1 + b3;
b = b + b1 + b2;
sum = sum + b1;
}
sh_sum[id] = sum;
}
__syncthreads();
if(id == 0){
int sum = 0;
for(int i = 0; i < end_row[sh_group_rows]-start_row[0]; i++){
sum += sh_sum[i];
}
out_sum[row] = sum;
}
__syncthreads();
}
|
bc610903045f0ba0143551eb694f26510c60aa5e.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
//https://proofwiki.org/wiki/Product_of_Triangular_Matrices
int max_per_row = 0;
__global__
void devTrianglesCount(int* col_indx, int* csr_rows, int nnz, int rows, int* out_sum, int group_rows);
/**
* Description: Reads the data from the mtx files.
* The first row contains 3 integers: rows columns of the sparse graph
* and the number of non zero elements. The non zero elements are stored in
* COO format. Also the data have one-based indexing. While reading them we tra them
* to zero based indexing.
*
* @param data char[] the name of the file to read
* @param row_indx int* where the rows of the nnz are stored
* @param col_indx int* where the columns of the nnz are stored
* @param nnz int* the number of non zero elements
* @param rows int* the number of rows
* @param cols itn* the number of columns
*/
void readData(char data[], int **row_indx, int **col_indx, int* nnz, int * rows, int* cols){
FILE *f = fopen(data,"r");
fscanf(f, "%d %d %d\n",rows, cols, nnz);
printf("-READ %d %d %d\n",*rows,*cols,*nnz);
col_indx[0] = (int*)malloc((*nnz)*sizeof(int));
row_indx[0] = (int*)malloc((*nnz)*sizeof(int));
for(int i = 0; i < *nnz; i++){
fscanf(f, "%d %d", &col_indx[0][i] , &row_indx[0][i]);
// data have 1 base index
// transform to 0-based index
col_indx[0][i]--;
row_indx[0][i]--;
}
fclose(f);
}
/**
* Description: Returns an array with the non zero rows in compressed format: (length rows insteadn of nnz).
* Combined with the column index we have the CSR represantion of the sparse graph. Also finds the max non zero
* elements per row and updates the global variable max_per_row
*
* @param rows int
* @param nnz int
* @param row_indx int* the row vector from the COO format.
*
*
* Returns:
* csr_rows int*
*/
int* COOtoCSR(int rows, int nnz, int* row_indx){
// initialize
int* csr_rows = (int*)malloc(rows*sizeof(int));
for(int i = 0; i < rows; i++){
csr_rows[i] = 0;
}
// Transformation to CSR
for(int i = 0; i < nnz; i++){
int index = row_indx[i]+1;
if(index < rows)
csr_rows[index]++;
}
for(int i = 1; i < rows; i++){
if(csr_rows[i] > max_per_row){
max_per_row = csr_rows[i];
}
csr_rows[i] += csr_rows[i-1];
}
return csr_rows;
}
void printTime(struct timeval start, struct timeval end, char* str){
unsigned long ss,es,su,eu,s,u;
ss =start.tv_sec;
su = start.tv_usec;
es = end.tv_sec;
eu = end.tv_usec;
s = es - ss;
if(eu > su){
u = eu - su;
}else{
s--;
u = 1000000 + eu - su;
}
printf("%s,%lu,%lu\n",str,s,u);
}
int main(int argc, char** argv){
if(argc != 2){
printf("Invalid arguments\n");
return 1;
}
//cudaDeviceReset();
struct timeval start,end,ALLSTART,ALLEND;
// "auto.mtx"; // "data.csv"; // "great-britain_osm.mtx"; // "delaunay_n22.mtx"; //
printf("-Dataset: %s\n",argv[1]);
int rows,cols,nnz;
int *col_indx, *row_indx;
int sum;
/* Read Data in COO format and transform to 0 based index */
gettimeofday(&start,NULL);
readData(argv[1],&row_indx,&col_indx,&nnz,&rows,&cols);
gettimeofday(&end,NULL);
printTime(start,end, "Read Data");
// Transform to CSR
gettimeofday(&start,NULL);
int* csr_rows = COOtoCSR(rows, nnz, row_indx);
// We no longer need row_indx since we have csr_rows
free(row_indx);
gettimeofday(&end,NULL);
printTime(start,end, "CSR");
printf("-MAX PER ROW = %d\n",max_per_row);
gettimeofday(&start,NULL);
cudaError_t cuer;
int *cu_col_indx, *cu_csr_rows;
int* cu_sum;
cuer = cudaMalloc(&cu_col_indx,nnz*sizeof(int));
printf("-%s\n",cudaGetErrorName(cuer));
cuer = cudaMalloc(&cu_csr_rows,rows*sizeof(int));
printf("-%s\n",cudaGetErrorName(cuer));
cuer = cudaMalloc(&cu_sum,rows*sizeof(int));
printf("-%s\n",cudaGetErrorName(cuer));
cuer = cudaMemcpy(cu_col_indx,col_indx,nnz*sizeof(int),cudaMemcpyHostToDevice);
printf("-%s\n",cudaGetErrorName(cuer));
cuer = cudaMemcpy(cu_csr_rows,csr_rows,rows*sizeof(int),cudaMemcpyHostToDevice);
printf("-%s\n",cudaGetErrorName(cuer));
int* res = (int*)malloc(rows*sizeof(int));
for(int i = 0; i < rows; i++){
res[i] = 0;
}
cudaMemcpy(cu_sum,res,rows*sizeof(int),cudaMemcpyHostToDevice);
gettimeofday(&end,NULL);
printTime(start,end, "CUDA data transfer");
gettimeofday(&start,NULL);
//rows = 100;
int threads = max_per_row;
if(max_per_row > 64){
return 1;
}
int group_rows = 64/threads;
if(group_rows > 8){
group_rows = 8;
}
threads = threads * group_rows;
int blocksize = (1 + rows/group_rows)/(512*512) + 1;
printf("-blocksize %d %d\n", blocksize, 512*512);
printf("Group number: %d\n",group_rows);
printf("Threads = MaxNNZ*group_rows: %d %d %d \n",threads,max_per_row,group_rows);
printf("Row span = %d * %d = %d | actual rows %d\n",blocksize*512*512, group_rows, blocksize*group_rows*512*512,rows);
devTrianglesCount<<<dim3(512,512,blocksize),threads>>>(cu_col_indx, cu_csr_rows, nnz, rows, cu_sum, group_rows);
printf("-%s\n",cudaGetErrorName(cuer));
cuer = cudaMemcpy(res,cu_sum,rows*sizeof(int),cudaMemcpyDeviceToHost);
printf("-%s\n",cudaGetErrorName(cuer));
sum = 0;
for(int i = 0; i < rows; i++){
if(res[i] > 0)
sum += res[i];
}
printf("-Cuda triangles = %d\n",sum);
gettimeofday(&end,NULL);
printTime(start,end,"CUDA");
}
__global__
void devTrianglesCount(int* col_indx, int* csr_rows, int nnz, int num_of_rows, int* out_sum, int group_rows){
int row = blockIdx.x*gridDim.y*gridDim.z + blockIdx.y*gridDim.z + blockIdx.z;
int id = threadIdx.x;
int own_group = -1;
int group_offset = 0;
if(row*group_rows >= num_of_rows){
return;
}
//max group number = 16
__shared__ int sh_group_rows;
__shared__ int start_row[16];
__shared__ int end_row[16];
__shared__ int len[16];
__shared__ int* row_ptr;
__shared__ int current_row[64];
__shared__ int sh_len[64];
__shared__ int* sh_ptr[64];
__shared__ int sh_cols[64][64];
__shared__ int sh_sum[64];
sh_sum[id] = 0;
end_row[id] = 0;
start_row[id] = 0;
__syncthreads();
// Get the current rows
sh_len[id] = 0;
if(id == 0){
sh_group_rows = group_rows-1;
for(int i = 0; i < group_rows; i++){
int t_row = row*group_rows + i; // temp row
start_row[i] = csr_rows[t_row];
if(t_row == num_of_rows - 1){
sh_group_rows = i-1;
end_row[i] = nnz;
}else{
end_row[i] = csr_rows[t_row+1];
}
len[i] = end_row[i] - start_row[i];
}
row_ptr = &col_indx[start_row[0]];
}
__syncthreads();
// if(id == 0){
// start_row = csr_rows[row];
// if(row == num_of_rows-1){
// end_row = nnz;
// }else{
// end_row = csr_rows[row+1];
// }
// len = end_row - start_row;
// row_ptr = &col_indx[start_row];
// }
// __syncthreads();
if(id < end_row[sh_group_rows] - start_row[0]){
current_row[id] = row_ptr[id];
}
__syncthreads();
// Assign each thread to a group
for(int i = 0; i < sh_group_rows+1; i++){
//printf("len %d \n",end_row[i] - start_row[0]);
if(id < end_row[i] - start_row[0]){
own_group = i;
group_offset = 0;
if(i > 0){
group_offset = end_row[i-1]-start_row[0];
}
break;
}
}
__syncthreads();
if(row < 50){
// printf("id %d group offset %d \n",id,group_offset);
}
// Get info for each column
if(own_group >= 0){
int tmp_col = current_row[id];
//printf("ID %d, group %d %d, row %d, len %d , own %d END %d START %d\n", id,sh_group_rows, group_offset, row,len[0], own_group,tmp_col,1);
int tmp_start = csr_rows[tmp_col];
int tmp_end;
if(tmp_col == num_of_rows-1){
tmp_end = nnz;
}else{
tmp_end = csr_rows[tmp_col+1];
}
sh_len[id] = tmp_end - tmp_start;
sh_ptr[id] = &col_indx[tmp_start];
}
__syncthreads();
for(int i = 0; i < end_row[sh_group_rows]-start_row[0]; i++){
if(id < sh_len[i]){
sh_cols[i][id] = sh_ptr[i][id];
}
}
__syncthreads();
if(own_group >= 0){
int a = 0;
int b = 0;
int sum = 0;
while(1){
if(a == len[own_group] || b == sh_len[id]){
break;
}
int b1 = current_row[a + group_offset] == sh_cols[id][b];
int b2 = current_row[a + group_offset] > sh_cols[id][b];
int b3 = current_row[a + group_offset] < sh_cols[id][b];
a = a + b1 + b3;
b = b + b1 + b2;
sum = sum + b1;
}
sh_sum[id] = sum;
}
__syncthreads();
if(id == 0){
int sum = 0;
for(int i = 0; i < end_row[sh_group_rows]-start_row[0]; i++){
sum += sh_sum[i];
}
out_sum[row] = sum;
}
__syncthreads();
}
|
8ca31ddd4ef99ceb21692308134260f9143daf29.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <chrono>
#include <iomanip>
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include "hiprand/hiprand.h"
#include "hiprand/hiprand_kernel.h"
#include <cmath>
#define BLOCK_SIZE 32
#define ITERS 100000
__global__ void calc_dist(double *X, double *Y, double *dist, int N) {
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
dist[i*N+j] = sqrt(pow((X[i] - X[j]), 2) + pow((Y[i] - Y[j]), 2));
}
__global__ void random_sol(int *solutions, double *costs, double *distances, int N, int nSols) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
double cost = 0;
for (int k = 0; k < N; k++) {
solutions[i * N + k] = k;
}
hiprandState_t st;
hiprand_init(0, i, 0, &st);
int idx;
for (int k = 1; k < N; k++){
idx = (int) ((N-k) * hiprand_uniform(&st) + k);
int tmp = solutions[i * N + k];
solutions[i * N + k] = solutions[i * N + idx];
solutions[i * N + idx] = tmp;
cost += distances[solutions[i * N + k-1] * N + solutions[i * N + k]];
}
cost += distances[solutions[i * N] * N + solutions[i * N + N-1]];
costs[i] = cost;
// for (int k=0; k<i;k++){
// for (int j=0;j<i;j++){
// if costs[k]<costs[j]{
// }
// }
// }
}
int main() {
double N;
std::cin >> N;
thrust::host_vector<double> host_x(N);
thrust::host_vector<double> host_y(N);
double x, y;
for (int i = 0; i < N; i++) {
std::cin >> x;
std::cin >> y;
host_x[i] = x;
host_y[i] = y;
}
//copia as coisas para a gpu
thrust::device_vector<double> dev_x(host_x);
thrust::device_vector<double> dev_y(host_y);
thrust::device_vector<double> dev_points_distance(N * N);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(ceil(N / threads.x), ceil(N / threads.y));
hipLaunchKernelGGL(( calc_dist), dim3(grid),dim3(threads), 0, 0, thrust::raw_pointer_cast(dev_x.data()), thrust::raw_pointer_cast(dev_y.data()), thrust::raw_pointer_cast(dev_points_distance.data()), N);
double nSols = 1024;
int gpu_threads = 1024;
thrust::device_vector<int> dev_solutions(nSols * N);
thrust::device_vector<double> dev_costs(nSols);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
hipLaunchKernelGGL(( random_sol), dim3(ceil((double) nSols/gpu_threads)), dim3(gpu_threads), 0, 0, thrust::raw_pointer_cast(dev_solutions.data()), thrust::raw_pointer_cast(dev_costs.data()), thrust::raw_pointer_cast(dev_points_distance.data()), N, nSols);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
thrust::device_vector<double>::iterator iter = thrust::min_element(dev_costs.begin(), dev_costs.end());
int position = iter - dev_costs.begin();
double min_val = *iter;
std::cout << min_val;
std::cout << " 0" << std::endl;
for (int i = position * N; i < position * N + N; i++) {
std::cout << dev_solutions[i] << ' ';
}
std::cout << std::endl;
return 0;
}
|
8ca31ddd4ef99ceb21692308134260f9143daf29.cu
|
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <chrono>
#include <iomanip>
#include <cuda_runtime.h>
#include <thrust/host_vector.h>
#include "curand.h"
#include "curand_kernel.h"
#include <cmath>
#define BLOCK_SIZE 32
#define ITERS 100000
__global__ void calc_dist(double *X, double *Y, double *dist, int N) {
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
dist[i*N+j] = sqrt(pow((X[i] - X[j]), 2) + pow((Y[i] - Y[j]), 2));
}
__global__ void random_sol(int *solutions, double *costs, double *distances, int N, int nSols) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
double cost = 0;
for (int k = 0; k < N; k++) {
solutions[i * N + k] = k;
}
curandState_t st;
curand_init(0, i, 0, &st);
int idx;
for (int k = 1; k < N; k++){
idx = (int) ((N-k) * curand_uniform(&st) + k);
int tmp = solutions[i * N + k];
solutions[i * N + k] = solutions[i * N + idx];
solutions[i * N + idx] = tmp;
cost += distances[solutions[i * N + k-1] * N + solutions[i * N + k]];
}
cost += distances[solutions[i * N] * N + solutions[i * N + N-1]];
costs[i] = cost;
// for (int k=0; k<i;k++){
// for (int j=0;j<i;j++){
// if costs[k]<costs[j]{
// }
// }
// }
}
int main() {
double N;
std::cin >> N;
thrust::host_vector<double> host_x(N);
thrust::host_vector<double> host_y(N);
double x, y;
for (int i = 0; i < N; i++) {
std::cin >> x;
std::cin >> y;
host_x[i] = x;
host_y[i] = y;
}
//copia as coisas para a gpu
thrust::device_vector<double> dev_x(host_x);
thrust::device_vector<double> dev_y(host_y);
thrust::device_vector<double> dev_points_distance(N * N);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(ceil(N / threads.x), ceil(N / threads.y));
calc_dist<<<grid,threads>>>(thrust::raw_pointer_cast(dev_x.data()), thrust::raw_pointer_cast(dev_y.data()), thrust::raw_pointer_cast(dev_points_distance.data()), N);
double nSols = 1024;
int gpu_threads = 1024;
thrust::device_vector<int> dev_solutions(nSols * N);
thrust::device_vector<double> dev_costs(nSols);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
random_sol<<<ceil((double) nSols/gpu_threads), gpu_threads>>>(thrust::raw_pointer_cast(dev_solutions.data()), thrust::raw_pointer_cast(dev_costs.data()), thrust::raw_pointer_cast(dev_points_distance.data()), N, nSols);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
thrust::device_vector<double>::iterator iter = thrust::min_element(dev_costs.begin(), dev_costs.end());
int position = iter - dev_costs.begin();
double min_val = *iter;
std::cout << min_val;
std::cout << " 0" << std::endl;
for (int i = position * N; i < position * N + N; i++) {
std::cout << dev_solutions[i] << ' ';
}
std::cout << std::endl;
return 0;
}
|
d4beb6172693bb05f453c18826e3f077d81d3507.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019 by Contributors
#include <gtest/gtest.h>
#include <xgboost/data.h>
#include "../../../src/data/adapter.h"
#include "../../../src/data/simple_dmatrix.h"
#include "../../../src/common/timer.h"
#include "../helpers.h"
#include <thrust/device_vector.h>
#include "../../../src/data/device_adapter.cuh"
#include "test_array_interface.h"
using namespace xgboost; // NOLINT
void TestCudfAdapter()
{
constexpr size_t kRowsA {16};
constexpr size_t kRowsB {16};
std::vector<Json> columns;
thrust::device_vector<double> d_data_0(kRowsA);
thrust::device_vector<uint32_t> d_data_1(kRowsB);
columns.emplace_back(GenerateDenseColumn<double>("<f8", kRowsA, &d_data_0));
columns.emplace_back(GenerateDenseColumn<uint32_t>("<u4", kRowsB, &d_data_1));
Json column_arr {columns};
std::stringstream ss;
Json::Dump(column_arr, &ss);
std::string str = ss.str();
data::CudfAdapter adapter(str);
adapter.Next();
auto & batch = adapter.Value();
EXPECT_EQ(batch.Size(), kRowsA + kRowsB);
EXPECT_NO_THROW({
dh::LaunchN(0, batch.Size(), [=] __device__(size_t idx) {
auto element = batch.GetElement(idx);
KERNEL_CHECK(element.row_idx == idx / 2);
if (idx % 2 == 0) {
KERNEL_CHECK(element.column_idx == 0);
KERNEL_CHECK(element.value == element.row_idx * 2.0f);
} else {
KERNEL_CHECK(element.column_idx == 1);
KERNEL_CHECK(element.value == element.row_idx * 2.0f);
}
});
dh::safe_cuda(hipDeviceSynchronize());
});
}
TEST(DeviceAdapter, CudfAdapter) {
TestCudfAdapter();
}
|
d4beb6172693bb05f453c18826e3f077d81d3507.cu
|
// Copyright (c) 2019 by Contributors
#include <gtest/gtest.h>
#include <xgboost/data.h>
#include "../../../src/data/adapter.h"
#include "../../../src/data/simple_dmatrix.h"
#include "../../../src/common/timer.h"
#include "../helpers.h"
#include <thrust/device_vector.h>
#include "../../../src/data/device_adapter.cuh"
#include "test_array_interface.h"
using namespace xgboost; // NOLINT
void TestCudfAdapter()
{
constexpr size_t kRowsA {16};
constexpr size_t kRowsB {16};
std::vector<Json> columns;
thrust::device_vector<double> d_data_0(kRowsA);
thrust::device_vector<uint32_t> d_data_1(kRowsB);
columns.emplace_back(GenerateDenseColumn<double>("<f8", kRowsA, &d_data_0));
columns.emplace_back(GenerateDenseColumn<uint32_t>("<u4", kRowsB, &d_data_1));
Json column_arr {columns};
std::stringstream ss;
Json::Dump(column_arr, &ss);
std::string str = ss.str();
data::CudfAdapter adapter(str);
adapter.Next();
auto & batch = adapter.Value();
EXPECT_EQ(batch.Size(), kRowsA + kRowsB);
EXPECT_NO_THROW({
dh::LaunchN(0, batch.Size(), [=] __device__(size_t idx) {
auto element = batch.GetElement(idx);
KERNEL_CHECK(element.row_idx == idx / 2);
if (idx % 2 == 0) {
KERNEL_CHECK(element.column_idx == 0);
KERNEL_CHECK(element.value == element.row_idx * 2.0f);
} else {
KERNEL_CHECK(element.column_idx == 1);
KERNEL_CHECK(element.value == element.row_idx * 2.0f);
}
});
dh::safe_cuda(cudaDeviceSynchronize());
});
}
TEST(DeviceAdapter, CudfAdapter) {
TestCudfAdapter();
}
|
2a0ebb1b36227dea28c7a6dfe93cf753329f1e2f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 19-Oct-2012 16:21:12
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
double *ind_arg0,
int *ind_map,
short *arg_map,
int *arg1,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg0_l[4];
int arg1_l[1];
for (int d=0; d<1; d++) arg1_l[d]=ZERO_int;
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ double *ind_arg0_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*1];
ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*4; n+=blockDim.x)
ind_arg0_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg0_l[d] = ZERO_double;
// user-supplied kernel call
res_calc( arg0_l,
arg1_l );
col2 = colors[n+offset_b];
}
// store local variables
int arg0_map;
if (col2>=0) {
arg0_map = arg_map[0*set_size+n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg0_s[d+arg0_map*4] += arg0_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg0_size*4; n+=blockDim.x)
ind_arg0[n%4+ind_arg0_map[n/4]*4] += ind_arg0_s[n];
// global reductions
for(int d=0; d<1; d++)
op_reduction<OP_INC>(&arg1[d+blockIdx.x*1],arg1_l[d]);
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1 ){
int *arg1h = (int *)arg1.data;
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
int ninds = 1;
int inds[2] = {0,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
// get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0;
op_timing_realloc(0);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
if (set->size >0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
op_timers_core(&cpu_t1, &wall_t1);
// transfer global reduction data to GPU
int maxblocks = 0;
for (int col=0; col < Plan->ncolors; col++)
maxblocks = MAX(maxblocks,Plan->ncolblk[col]);
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int));
reduct_size = MAX(reduct_size,sizeof(int));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg1.data = OP_reduct_h + reduct_bytes;
arg1.data_d = OP_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
((int *)arg1.data)[d+b*1] = ZERO_int;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int));
mvReductArraysToDevice(reduct_bytes);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args);
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = MAX(Plan->nshared,reduct_size*nthread);
hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(double *)arg0.data_d,
Plan->ind_map,
Plan->loc_map,
(int *)arg1.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set_size);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
// transfer global reduction data back to CPU
if (col == Plan->ncolors_owned - 1)
mvReductArraysToHost(reduct_bytes);
}
block_offset += Plan->ncolblk[col];
}
op_timing_realloc(0);
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
arg1h[d] = arg1h[d] + ((int *)arg1.data)[d+b*1];
arg1.data = (char *)arg1h;
op_mpi_reduce(&arg1,arg1h);
}
op_mpi_set_dirtybit(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
}
|
2a0ebb1b36227dea28c7a6dfe93cf753329f1e2f.cu
|
//
// auto-generated by op2.m on 19-Oct-2012 16:21:12
//
// user function
__device__
#include "res_calc.h"
// CUDA kernel function
__global__ void op_cuda_res_calc(
double *ind_arg0,
int *ind_map,
short *arg_map,
int *arg1,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg0_l[4];
int arg1_l[1];
for (int d=0; d<1; d++) arg1_l[d]=ZERO_int;
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ double *ind_arg0_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return;
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*1];
ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*4; n+=blockDim.x)
ind_arg0_s[n] = ZERO_double;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg0_l[d] = ZERO_double;
// user-supplied kernel call
res_calc( arg0_l,
arg1_l );
col2 = colors[n+offset_b];
}
// store local variables
int arg0_map;
if (col2>=0) {
arg0_map = arg_map[0*set_size+n+offset_b];
}
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg0_s[d+arg0_map*4] += arg0_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg0_size*4; n+=blockDim.x)
ind_arg0[n%4+ind_arg0_map[n/4]*4] += ind_arg0_s[n];
// global reductions
for(int d=0; d<1; d++)
op_reduction<OP_INC>(&arg1[d+blockIdx.x*1],arg1_l[d]);
}
// host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1 ){
int *arg1h = (int *)arg1.data;
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
int ninds = 1;
int inds[2] = {0,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
// get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges(set, nargs, args);
// initialise timers
double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0;
op_timing_realloc(0);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
if (set->size >0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
op_timers_core(&cpu_t1, &wall_t1);
// transfer global reduction data to GPU
int maxblocks = 0;
for (int col=0; col < Plan->ncolors; col++)
maxblocks = MAX(maxblocks,Plan->ncolblk[col]);
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int));
reduct_size = MAX(reduct_size,sizeof(int));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg1.data = OP_reduct_h + reduct_bytes;
arg1.data_d = OP_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
((int *)arg1.data)[d+b*1] = ZERO_int;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(int));
mvReductArraysToDevice(reduct_bytes);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args);
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
int nshared = MAX(Plan->nshared,reduct_size*nthread);
op_cuda_res_calc<<<nblocks,nthread,nshared>>>(
(double *)arg0.data_d,
Plan->ind_map,
Plan->loc_map,
(int *)arg1.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set_size);
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg("op_cuda_res_calc execution failed\n");
// transfer global reduction data back to CPU
if (col == Plan->ncolors_owned - 1)
mvReductArraysToHost(reduct_bytes);
}
block_offset += Plan->ncolblk[col];
}
op_timing_realloc(0);
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++)
arg1h[d] = arg1h[d] + ((int *)arg1.data)[d+b*1];
arg1.data = (char *)arg1h;
op_mpi_reduce(&arg1,arg1h);
}
op_mpi_set_dirtybit(nargs, args);
// update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
}
|
e4f391323926c5a2f57fc45766b9b96c5c37a2a2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file bilinear_sampler.cu
* \brief
* \author Xu Dong
*/
#include "./bilinear_sampler-inl.h"
#include <algorithm>
#include "../common/cuda_utils.h"
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
#include "./cudnn_bilinear_sampler-inl.h"
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ bool between(DType value, int lowerBound, int upperBound) {
return (value >= lowerBound && value <= upperBound);
}
template<typename DType>
__global__ void BilinearSamplerForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
int out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int grid_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_left_v = *(data + data_index);
if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_right_v = *(data + data_index + 1);
if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_left_v = *(data + data_index + i_w);
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_right_v = *(data + data_index + i_w + 1);
*(out+out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
}
template<typename DType, int Req1, int Req2>
__global__ void BilinearSamplerBackwardKernel(const int i_c, const int i_h,
const int i_w, const DType* grad,
const DType* data, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* g_input,
const DType* grid_src,
DType* grad_grid) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
int grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (int c = 0; c < o_c; ++c) {
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
// calc 4 vertex value in input data
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
// calc input grad
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w);
}
top_left_v = *(data + data_index);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index + 1],
*(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w));
}
top_right_v = *(data + data_index + 1);
}
if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index+ i_w],
*(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w);
}
bottom_left_v = *(data + data_index + i_w);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index+ i_w + 1],
*(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w));
}
bottom_right_v = *(data + data_index + i_w + 1);
}
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_x_w);
top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_y_w);
}
if (Req2 != mxnet::kNullOp) {
// calc grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h - 1) / 2;
*(grad_grid + grid_src_index) += top_left_x_gw * (i_w - 1) / 2;
}
}
}
} // namespace cuda
template<typename DType>
inline void BilinearSamplerForward(const Tensor<gpu, 4, DType> &output,
const Tensor<gpu, 4, DType> &input,
const Tensor<gpu, 4, DType> &grid_src) {
DType *out = output.dptr_;
const DType *data = input.dptr_;
const DType *grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler forward");
hipStream_t stream = Stream<gpu>::GetStream(output.stream_);
cuda::BilinearSamplerForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
// post kernel check
hipError_t err = hipPeekAtLastError();
CHECK_EQ(err, hipSuccess) << hipGetErrorString(err);
}
template<typename DType>
inline void BilinearSamplerBackward(const Tensor<gpu, 4, DType> &input_grad,
const Tensor<gpu, 4, DType> &ggrid,
const Tensor<gpu, 4, DType> &output_grad,
const Tensor<gpu, 4, DType> &input_data,
const Tensor<gpu, 4, DType> &grid,
const mxnet::OpReqType data_req,
const mxnet::OpReqType grid_req) {
using namespace mxnet;
DType *g_input = input_grad.dptr_;
DType *grad_grid = ggrid.dptr_;
const DType *grid_src = grid.dptr_;
const DType *grad = output_grad.dptr_;
const DType *data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1),
o_h = output_grad.size(2), o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1)
/ kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler backward");
hipStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
MXNET_REQ_TYPE_SWITCH(data_req, Req1, {
MXNET_REQ_TYPE_SWITCH(grid_req, Req2, {
hipLaunchKernelGGL(( cuda::BilinearSamplerBackwardKernel<DType, Req1, Req2>)
, dim3(num_blocks), dim3(threads_per_block), 0, stream ,
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src, grad_grid);
});
});
// post kernel check
hipError_t err = hipPeekAtLastError();
CHECK_EQ(err, hipSuccess) << hipGetErrorString(err);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(BilinearSamplerParam param, int dtype) {
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
if (param.cudnn_off.has_value() && param.cudnn_off.value()) {
op = new BilinearSamplerOp<gpu, DType>(param);
} else {
op = new CuDNNBilinearSamplerOp<DType>(param);
}
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new BilinearSamplerOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
return op;
}
} // namespace op
} // namespace mxnet
|
e4f391323926c5a2f57fc45766b9b96c5c37a2a2.cu
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file bilinear_sampler.cu
* \brief
* \author Xu Dong
*/
#include "./bilinear_sampler-inl.h"
#include <algorithm>
#include "../common/cuda_utils.h"
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
#include "./cudnn_bilinear_sampler-inl.h"
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ bool between(DType value, int lowerBound, int upperBound) {
return (value >= lowerBound && value <= upperBound);
}
template<typename DType>
__global__ void BilinearSamplerForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
int out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int grid_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_left_v = *(data + data_index);
if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_right_v = *(data + data_index + 1);
if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_left_v = *(data + data_index + i_w);
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_right_v = *(data + data_index + i_w + 1);
*(out+out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
}
template<typename DType, int Req1, int Req2>
__global__ void BilinearSamplerBackwardKernel(const int i_c, const int i_h,
const int i_w, const DType* grad,
const DType* data, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* g_input,
const DType* grid_src,
DType* grad_grid) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
int grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2;
int top_left_y = static_cast<int>(floor(y_real));
int top_left_x = static_cast<int>(floor(x_real));
DType top_left_y_w = 1.0 - (y_real - top_left_y);
DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (int c = 0; c < o_c; ++c) {
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
// calc 4 vertex value in input data
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
// calc input grad
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w);
}
top_left_v = *(data + data_index);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index + 1],
*(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w));
}
top_right_v = *(data + data_index + 1);
}
if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index+ i_w],
*(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w);
}
bottom_left_v = *(data + data_index + i_w);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index+ i_w + 1],
*(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w));
}
bottom_right_v = *(data + data_index + i_w + 1);
}
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_x_w);
top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_y_w);
}
if (Req2 != mxnet::kNullOp) {
// calc grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h - 1) / 2;
*(grad_grid + grid_src_index) += top_left_x_gw * (i_w - 1) / 2;
}
}
}
} // namespace cuda
template<typename DType>
inline void BilinearSamplerForward(const Tensor<gpu, 4, DType> &output,
const Tensor<gpu, 4, DType> &input,
const Tensor<gpu, 4, DType> &grid_src) {
DType *out = output.dptr_;
const DType *data = input.dptr_;
const DType *grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler forward");
cudaStream_t stream = Stream<gpu>::GetStream(output.stream_);
cuda::BilinearSamplerForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
// post kernel check
cudaError err = cudaPeekAtLastError();
CHECK_EQ(err, cudaSuccess) << cudaGetErrorString(err);
}
template<typename DType>
inline void BilinearSamplerBackward(const Tensor<gpu, 4, DType> &input_grad,
const Tensor<gpu, 4, DType> &ggrid,
const Tensor<gpu, 4, DType> &output_grad,
const Tensor<gpu, 4, DType> &input_data,
const Tensor<gpu, 4, DType> &grid,
const mxnet::OpReqType data_req,
const mxnet::OpReqType grid_req) {
using namespace mxnet;
DType *g_input = input_grad.dptr_;
DType *grad_grid = ggrid.dptr_;
const DType *grid_src = grid.dptr_;
const DType *grad = output_grad.dptr_;
const DType *data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1),
o_h = output_grad.size(2), o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1)
/ kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler backward");
cudaStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
MXNET_REQ_TYPE_SWITCH(data_req, Req1, {
MXNET_REQ_TYPE_SWITCH(grid_req, Req2, {
cuda::BilinearSamplerBackwardKernel<DType, Req1, Req2>
<<<num_blocks, threads_per_block, 0, stream >>>(
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src, grad_grid);
});
});
// post kernel check
cudaError err = cudaPeekAtLastError();
CHECK_EQ(err, cudaSuccess) << cudaGetErrorString(err);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(BilinearSamplerParam param, int dtype) {
Operator *op = NULL;
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
if (param.cudnn_off.has_value() && param.cudnn_off.value()) {
op = new BilinearSamplerOp<gpu, DType>(param);
} else {
op = new CuDNNBilinearSamplerOp<DType>(param);
}
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new BilinearSamplerOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN && CUDNN_MAJOR
return op;
}
} // namespace op
} // namespace mxnet
|
8604b6b1a85956e9f9813e64e50572ba07c75246.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2021 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cusparseHelper.h"
#include "saiga/core/util/assert.h"
namespace Saiga
{
namespace CUDA
{
#ifdef SAIGA_USE_CUSPARSE
hipsparseHandle_t cusparseHandle = 0;
hipblasHandle_t cublashandle = 0;
void initBLASSPARSE()
{
if (!isBLASSPARSEInitialized())
{
hipblasCreate(&cublashandle);
hipsparseCreate(&cusparseHandle);
}
}
void destroyBLASSPARSE()
{
if (isBLASSPARSEInitialized())
{
hipsparseDestroy(cusparseHandle);
hipblasDestroy(cublashandle);
cusparseHandle = 0;
cublashandle = 0;
}
}
bool isBLASSPARSEInitialized()
{
return cusparseHandle != 0;
}
extern void testCuBLAS();
extern void testCuSparse();
void runBLASSPARSETests()
{
testCuBLAS();
testCuSparse();
}
#endif
} // namespace CUDA
} // namespace Saiga
|
8604b6b1a85956e9f9813e64e50572ba07c75246.cu
|
/**
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cusparseHelper.h"
#include "saiga/core/util/assert.h"
namespace Saiga
{
namespace CUDA
{
#ifdef SAIGA_USE_CUSPARSE
cusparseHandle_t cusparseHandle = 0;
cublasHandle_t cublashandle = 0;
void initBLASSPARSE()
{
if (!isBLASSPARSEInitialized())
{
cublasCreate(&cublashandle);
cusparseCreate(&cusparseHandle);
}
}
void destroyBLASSPARSE()
{
if (isBLASSPARSEInitialized())
{
cusparseDestroy(cusparseHandle);
cublasDestroy(cublashandle);
cusparseHandle = 0;
cublashandle = 0;
}
}
bool isBLASSPARSEInitialized()
{
return cusparseHandle != 0;
}
extern void testCuBLAS();
extern void testCuSparse();
void runBLASSPARSETests()
{
testCuBLAS();
testCuSparse();
}
#endif
} // namespace CUDA
} // namespace Saiga
|
c13a03282eef64af91d9ac56ffe23951652a1fd4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "MonteRayDefinitions.hh"
#include "Material_test_helper.hh"
MaterialTestHelper::MaterialTestHelper(){}
MaterialTestHelper::~MaterialTestHelper(){}
void MaterialTestHelper::setupTimers(){
#ifdef __HIPCC__
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
}
void MaterialTestHelper::stopTimers(){
float elapsedTime;
#ifdef __HIPCC__
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop );
#endif
std::cout << "MaterialTestHelper: Elapsed time in CUDA kernel=" << elapsedTime << " msec" << std::endl;
}
|
c13a03282eef64af91d9ac56ffe23951652a1fd4.cu
|
#include <iostream>
#include "MonteRayDefinitions.hh"
#include "Material_test_helper.hh"
MaterialTestHelper::MaterialTestHelper(){}
MaterialTestHelper::~MaterialTestHelper(){}
void MaterialTestHelper::setupTimers(){
#ifdef __CUDACC__
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
}
void MaterialTestHelper::stopTimers(){
float elapsedTime;
#ifdef __CUDACC__
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop );
#endif
std::cout << "MaterialTestHelper: Elapsed time in CUDA kernel=" << elapsedTime << " msec" << std::endl;
}
|
80e55552d9c794a18da875cd7b0137d1f2eb3b46.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void calculateResidual_CUDA(float *a, float *b, float *c) {
__shared__ float se[1024];
int tid=threadIdx.x;
int bid=blockIdx.x;
int n=blockDim.x;
// Calculate
se[tid]=fabsf(a[tid+bid*n]-b[tid+bid*n]);
__syncthreads();
// Reducto
int numActiveThreads=n/2;
while(numActiveThreads>0) {
if(tid<numActiveThreads) {
se[tid]=se[tid]+se[tid+numActiveThreads];
}
numActiveThreads=numActiveThreads/2;
__syncthreads();
}
if(tid==0) {
atomicAdd(c,se[0]);
}
}
|
80e55552d9c794a18da875cd7b0137d1f2eb3b46.cu
|
#include "includes.h"
__global__ void calculateResidual_CUDA(float *a, float *b, float *c) {
__shared__ float se[1024];
int tid=threadIdx.x;
int bid=blockIdx.x;
int n=blockDim.x;
// Calculate
se[tid]=fabsf(a[tid+bid*n]-b[tid+bid*n]);
__syncthreads();
// Reducto
int numActiveThreads=n/2;
while(numActiveThreads>0) {
if(tid<numActiveThreads) {
se[tid]=se[tid]+se[tid+numActiveThreads];
}
numActiveThreads=numActiveThreads/2;
__syncthreads();
}
if(tid==0) {
atomicAdd(c,se[0]);
}
}
|
29177d110bef3b09211fe55af1405864303aca97.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define BLOCK_SIZE 128
#define BLOCK_SIZE_F 128.0
__global__
void sumRedKernel(float *A, int n){
__shared__ float partialSum[BLOCK_SIZE*2];
int i = (threadIdx.x + blockDim.x * blockIdx.x)*2;
unsigned int t = threadIdx.x * 2;
partialSum[t] = A[i];
partialSum[t+1] = A[i+1];
for(unsigned int stride = 1; stride < blockDim.x*2; stride *= 2){
__syncthreads();
if(t % (2*stride) == 0 && (t+stride) < n) partialSum[t] += partialSum[t+stride];
}
__syncthreads();
if(threadIdx.x == 0){
A[blockIdx.x] = partialSum[0];
}
}
void sumRed(float* A, int n){
int size = n*sizeof(float);
float *d_A;
hipMalloc((void **) &d_A, size);
hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sumRedKernel), dim3(ceil(n/(BLOCK_SIZE_F*2))), dim3(BLOCK_SIZE), 0, 0, d_A,n);
hipMemcpy(A,d_A,size,hipMemcpyDeviceToHost);
hipFree(d_A);
}
int main(){
int n,i;
float *h_A;
scanf("%d", &n);
h_A = (float*) malloc(n*sizeof(float));
for(i = 0; i < n; i++){
//scanf("%f", &h_A[i]);
h_A[i] = 1;
}
while(n > 1){
sumRed(h_A,n);
n = ceil(n/(BLOCK_SIZE_F*2));
}
printf("%f", h_A[0]);
printf("\n");
return 0;
}
|
29177d110bef3b09211fe55af1405864303aca97.cu
|
#include <stdio.h>
#define BLOCK_SIZE 128
#define BLOCK_SIZE_F 128.0
__global__
void sumRedKernel(float *A, int n){
__shared__ float partialSum[BLOCK_SIZE*2];
int i = (threadIdx.x + blockDim.x * blockIdx.x)*2;
unsigned int t = threadIdx.x * 2;
partialSum[t] = A[i];
partialSum[t+1] = A[i+1];
for(unsigned int stride = 1; stride < blockDim.x*2; stride *= 2){
__syncthreads();
if(t % (2*stride) == 0 && (t+stride) < n) partialSum[t] += partialSum[t+stride];
}
__syncthreads();
if(threadIdx.x == 0){
A[blockIdx.x] = partialSum[0];
}
}
void sumRed(float* A, int n){
int size = n*sizeof(float);
float *d_A;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
sumRedKernel<<<ceil(n/(BLOCK_SIZE_F*2)), BLOCK_SIZE>>>(d_A,n);
cudaMemcpy(A,d_A,size,cudaMemcpyDeviceToHost);
cudaFree(d_A);
}
int main(){
int n,i;
float *h_A;
scanf("%d", &n);
h_A = (float*) malloc(n*sizeof(float));
for(i = 0; i < n; i++){
//scanf("%f", &h_A[i]);
h_A[i] = 1;
}
while(n > 1){
sumRed(h_A,n);
n = ceil(n/(BLOCK_SIZE_F*2));
}
printf("%f", h_A[0]);
printf("\n");
return 0;
}
|
8547270ac692fce96b7ec1c4a18de18970cc0c74.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/kernels/transpose/transpose_gpu_impl.cuh" // NOLINT
#include "dali/kernels/transpose/transpose_gpu_setup.cuh" // NOLINT
#include <gtest/gtest.h>
#include <algorithm>
#include <numeric>
#include <vector>
#include "dali/core/dev_buffer.h"
#include "dali/kernels/common/utils.h"
#include "dali/core/tensor_shape_print.h"
#include "dali/test/test_tensors.h"
#include "dali/core/cuda_event.h"
#include "dali/kernels/transpose/transpose_test.h"
namespace dali {
namespace kernels {
using namespace transpose_impl; // NOLINT
TEST(SimplifyPermute, NoSimplification) {
int64_t shape[] = { 2, 3, 4, 5 };
int perm[] = { 0, 3, 2, 1 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 4);
ref_shape = { 2, 3, 4, 5 };
ref_perm = { 0, 3, 2, 1 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(SimplifyPermute, CollapseUnitDims) {
int64_t shape[] = { 2, 1, 3, 4, 1, 5 };
int perm[] = { 0, 5, 1, 3, 2, 4 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 6);
ref_shape = { 2, 3, 4, 5 };
ref_perm = { 0, 3, 2, 1 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(SimplifyPermute, Collapse) {
int64_t shape[] = { 2, 1, 3, 4, 1, 5 };
int perm[] = { 3, 4, 5, 0, 1, 2 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 6);
ref_shape = { 6, 20 };
ref_perm = { 1, 0 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(TransposeGPU, GetTransposeMethod) {
{
TensorShape<> shape = { 640*480, 3 };
int perm[] = { 1, 0 };
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Deinterleave);
}
{
TensorShape<> shape = { 3, 640*480 };
int perm[] = { 1, 0 }; // interleave
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Interleave);
}
{
TensorShape<> shape = { 640, 480 };
int perm[] = { 1, 0 }; // scalar tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 20, 640, 480 };
int perm[] = { 1, 2, 0 }; // scalar tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 640, 480, 3 };
int perm[] = { 1, 0, 2 }; // vectorized tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 640, 3, 480 };
int perm[] = { 1, 2, 0 }; // some mess
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Generic);
}
{
TensorShape<> shape = { 640, 480, 50 };
int perm[] = { 1, 0, 2 }; // generic stuff
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Generic);
}
{
TensorShape<> shape = { 640*480 };
int perm[] = { 0 }; // identity
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 1, sizeof(int)),
TransposeMethod::Copy);
}
}
TEST(TransposeTiled, AllPerm4DInnermost) {
TensorShape<> shape = { 19, 57, 37, 53 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
auto start = CUDAEvent::CreateWithFlags(0);
auto end = CUDAEvent::CreateWithFlags(0);
int grid_size = ::max(1, size / 512);
ASSERT_LT(grid_size * 512, size) << "Weak test error: Grid too large to test grid loop";
for (auto &perm : testing::Permutations4) {
if (perm[3] == 3)
continue; // innermost dim must be permuted
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << "\n";
CUDA_CALL(hipMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
TiledTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu, in_gpu, grid_size);
CUDA_CALL(hipEventRecord(start));
hipLaunchKernelGGL(( TransposeTiledSingle), dim3(grid_size), dim3(dim3(32, 16)), kTiledTransposeMaxSharedMem, 0, desc);
CUDA_CALL(hipEventRecord(end));
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
float time;
CUDA_CALL(hipEventElapsedTime(&time, start, end));
time *= 1e+6;
std::cerr << 2*size*sizeof(*in_gpu.data()) / time << " GB/s" << "\n";
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeTiled, BuildDescVectorized) {
TensorShape<> shape = { 57, 37, 53, 4 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
CUDA_CALL(hipMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
copyH2D(in_gpu.data(), in_cpu.data(), size);
SmallVector<int, 6> perm = { 1, 2, 0, 3 };
int grid_size = 1024;
TiledTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu, in_gpu, grid_size);
EXPECT_EQ(desc.lanes, 4) << "Lanes not detected";
EXPECT_EQ(desc.ndim, 3) << "Number of dimensions should have shrunk in favor of lanes";
hipLaunchKernelGGL(( TransposeTiledSingle), dim3(grid_size), dim3(dim3(32, 16)), kTiledTransposeMaxSharedMem, 0, desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm.data(), perm.size());
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
TEST(TransposeTiled, BuildDescAndForceMisalignment) {
TensorShape<> shape = { 57, 37, 52, 4 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<uint8> in_cpu(size + 4), out_cpu(size + 4);
vector<uint8> ref(size + 4);
DeviceBuffer<uint8> in_gpu, out_gpu;
in_gpu.resize(size + 4);
out_gpu.resize(size + 4);
for (uintptr_t offset = 0; offset < 4; offset++) {
std::iota(in_cpu.begin(), in_cpu.end(), 0);
CUDA_CALL(hipMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
copyH2D(in_gpu.data() + offset, in_cpu.data(), size);
SmallVector<int, 6> perm = { 1, 2, 0, 3 };
int grid_size = 1024;
TiledTransposeDesc<uint8> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu.data() + offset,
in_gpu.data() + offset, grid_size);
EXPECT_EQ(desc.lanes, 4) << "Lanes not detected";
EXPECT_EQ(desc.ndim, 3) << "Number of dimensions should have shrunk in favor of lanes";
hipLaunchKernelGGL(( TransposeTiledSingle), dim3(grid_size), dim3(dim3(32, 16)), kTiledTransposeMaxSharedMem, 0, desc);
copyD2H(out_cpu.data(), out_gpu.data() + offset, size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm.data(), perm.size());
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeTiled, BuildDescVectorized16BitOpt) {
TensorShape<> shape = { 57, 37, 53, 4 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<uint16_t> in_cpu(size), out_cpu(size);
vector<uint16_t> ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<uint16_t> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
CUDA_CALL(hipMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
copyH2D(in_gpu.data(), in_cpu.data(), size);
SmallVector<int, 6> perm = { 1, 2, 0, 3 };
int grid_size = 1024;
TiledTransposeDesc<uint16_t> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu, in_gpu, grid_size);
EXPECT_EQ(desc.lanes, 4) << "Lanes not detected";
EXPECT_EQ(desc.ndim, 3) << "Number of dimensions should have shrunk in favor of lanes";
hipLaunchKernelGGL(( TransposeTiledSingle), dim3(grid_size), dim3(dim3(32, 16)), kTiledTransposeMaxSharedMem, 0, desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm.data(), perm.size());
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
TEST(TransposeTiled, HighDimensionTest) {
TensorShape<> shape = {3, 3, 5, 7, 23, 3, 37, 4 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<uint8> in_cpu(size), out_cpu(size);
vector<uint8> ref(size);
DeviceBuffer<uint8> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
for (int size_of_last_dim = 1; size_of_last_dim <= 4; size_of_last_dim++) {
shape = { 3, 3, 5, 7, 23, 3, 37, size_of_last_dim };
size = volume(shape);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
CUDA_CALL(hipMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
copyH2D(in_gpu.data(), in_cpu.data(), size);
SmallVector<int, 8> perm = { 1, 0, 4, 2, 6, 3, 5, 7 };
int grid_size = 1024;
TiledTransposeDesc<uint8> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu.data(), in_gpu.data(), grid_size);
hipLaunchKernelGGL(( TransposeTiledSingle), dim3(grid_size), dim3(dim3(32, 16)), kTiledTransposeMaxSharedMem, 0, desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm.data(), perm.size());
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeDeinterleave, AllPerm4DInnermost) {
int channels = 3;
TensorShape<> shape = { 19, 157, 137, channels }; // small inner dimension
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
int block_size = 256;
int grid_size = ::max(1, size / (block_size * channels));
ASSERT_LT(grid_size * block_size * channels, size)
<< "Weak test error: Grid too large to test grid loop";
auto start = CUDAEvent::CreateWithFlags(0);
auto end = CUDAEvent::CreateWithFlags(0);
for (auto &perm : testing::Permutations4) {
if (perm[3] == 3)
continue; // innermost dim must be permuted
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << "\n";
CUDA_CALL(hipMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
DeinterleaveDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitDeinterleave(desc, shape, make_span(perm), out_gpu, in_gpu);
CUDA_CALL(hipEventRecord(start));
hipLaunchKernelGGL(( TransposeDeinterleaveSingle), dim3(grid_size), dim3(block_size), 0, 0, desc);
CUDA_CALL(hipEventRecord(end));
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
float time;
CUDA_CALL(hipEventElapsedTime(&time, start, end));
time *= 1e+6;
std::cerr << 2*size*sizeof(*in_gpu.data()) / time << " GB/s" << "\n";
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeGeneric, AllPerm4D) {
TensorShape<> shape = { 31, 43, 53, 47 };
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
int grid_size = 2048;
int block_size = 256;
ASSERT_LT(grid_size * block_size, size) << "Weak test error: Grid too large to test grid loop";
for (auto &perm : testing::Permutations4) {
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << " input shape "
<< shape << "\n";
CUDA_CALL(hipMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
GenericTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitGenericTranspose(desc, shape, make_span(perm), out_gpu, in_gpu);
hipLaunchKernelGGL(( TransposeGenericSingle), dim3(grid_size), dim3(block_size), 0, 0, desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
TensorShape<> simplified_shape;
SmallVector<int, 6> simplified_perm;
SimplifyPermute(simplified_shape, simplified_perm, shape.data(), perm, 4);
if (simplified_shape == shape) {
for (int i = 0; i < 4; i++) {
ASSERT_EQ(simplified_perm[i], perm[i]);
}
// no simplification, don't repeat the test
continue;
}
std::cerr << "Testing permutation ";
for (auto i : simplified_perm)
std::cerr << i << " ";
std::cerr << " input shape " << simplified_shape << "\n";
memset(&desc, 0xCC, sizeof(desc));
CUDA_CALL(hipMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
InitGenericTranspose(desc, simplified_shape, make_span(simplified_perm), out_gpu, in_gpu);
hipLaunchKernelGGL(( TransposeGenericSingle), dim3(grid_size), dim3(block_size), 0, 0, desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
} // namespace kernels
} // namespace dali
|
8547270ac692fce96b7ec1c4a18de18970cc0c74.cu
|
// Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/kernels/transpose/transpose_gpu_impl.cuh" // NOLINT
#include "dali/kernels/transpose/transpose_gpu_setup.cuh" // NOLINT
#include <gtest/gtest.h>
#include <algorithm>
#include <numeric>
#include <vector>
#include "dali/core/dev_buffer.h"
#include "dali/kernels/common/utils.h"
#include "dali/core/tensor_shape_print.h"
#include "dali/test/test_tensors.h"
#include "dali/core/cuda_event.h"
#include "dali/kernels/transpose/transpose_test.h"
namespace dali {
namespace kernels {
using namespace transpose_impl; // NOLINT
TEST(SimplifyPermute, NoSimplification) {
int64_t shape[] = { 2, 3, 4, 5 };
int perm[] = { 0, 3, 2, 1 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 4);
ref_shape = { 2, 3, 4, 5 };
ref_perm = { 0, 3, 2, 1 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(SimplifyPermute, CollapseUnitDims) {
int64_t shape[] = { 2, 1, 3, 4, 1, 5 };
int perm[] = { 0, 5, 1, 3, 2, 4 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 6);
ref_shape = { 2, 3, 4, 5 };
ref_perm = { 0, 3, 2, 1 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(SimplifyPermute, Collapse) {
int64_t shape[] = { 2, 1, 3, 4, 1, 5 };
int perm[] = { 3, 4, 5, 0, 1, 2 };
TensorShape<> s_shape, ref_shape;
SmallVector<int, 6> s_perm, ref_perm;
SimplifyPermute(s_shape, s_perm, shape, perm, 6);
ref_shape = { 6, 20 };
ref_perm = { 1, 0 };
EXPECT_EQ(s_shape, ref_shape);
EXPECT_EQ(s_perm, ref_perm);
}
TEST(TransposeGPU, GetTransposeMethod) {
{
TensorShape<> shape = { 640*480, 3 };
int perm[] = { 1, 0 };
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Deinterleave);
}
{
TensorShape<> shape = { 3, 640*480 };
int perm[] = { 1, 0 }; // interleave
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Interleave);
}
{
TensorShape<> shape = { 640, 480 };
int perm[] = { 1, 0 }; // scalar tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 2, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 20, 640, 480 };
int perm[] = { 1, 2, 0 }; // scalar tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 640, 480, 3 };
int perm[] = { 1, 0, 2 }; // vectorized tiled
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Tiled);
}
{
TensorShape<> shape = { 640, 3, 480 };
int perm[] = { 1, 2, 0 }; // some mess
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Generic);
}
{
TensorShape<> shape = { 640, 480, 50 };
int perm[] = { 1, 0, 2 }; // generic stuff
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 3, sizeof(int)),
TransposeMethod::Generic);
}
{
TensorShape<> shape = { 640*480 };
int perm[] = { 0 }; // identity
EXPECT_EQ(GetTransposeMethod(shape.data(), perm, 1, sizeof(int)),
TransposeMethod::Copy);
}
}
TEST(TransposeTiled, AllPerm4DInnermost) {
TensorShape<> shape = { 19, 57, 37, 53 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
auto start = CUDAEvent::CreateWithFlags(0);
auto end = CUDAEvent::CreateWithFlags(0);
int grid_size = std::max(1, size / 512);
ASSERT_LT(grid_size * 512, size) << "Weak test error: Grid too large to test grid loop";
for (auto &perm : testing::Permutations4) {
if (perm[3] == 3)
continue; // innermost dim must be permuted
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << "\n";
CUDA_CALL(cudaMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
TiledTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu, in_gpu, grid_size);
CUDA_CALL(cudaEventRecord(start));
TransposeTiledSingle<<<grid_size, dim3(32, 16), kTiledTransposeMaxSharedMem>>>(desc);
CUDA_CALL(cudaEventRecord(end));
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
float time;
CUDA_CALL(cudaEventElapsedTime(&time, start, end));
time *= 1e+6;
std::cerr << 2*size*sizeof(*in_gpu.data()) / time << " GB/s" << "\n";
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeTiled, BuildDescVectorized) {
TensorShape<> shape = { 57, 37, 53, 4 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
CUDA_CALL(cudaMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
copyH2D(in_gpu.data(), in_cpu.data(), size);
SmallVector<int, 6> perm = { 1, 2, 0, 3 };
int grid_size = 1024;
TiledTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu, in_gpu, grid_size);
EXPECT_EQ(desc.lanes, 4) << "Lanes not detected";
EXPECT_EQ(desc.ndim, 3) << "Number of dimensions should have shrunk in favor of lanes";
TransposeTiledSingle<<<grid_size, dim3(32, 16), kTiledTransposeMaxSharedMem>>>(desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm.data(), perm.size());
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
TEST(TransposeTiled, BuildDescAndForceMisalignment) {
TensorShape<> shape = { 57, 37, 52, 4 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<uint8> in_cpu(size + 4), out_cpu(size + 4);
vector<uint8> ref(size + 4);
DeviceBuffer<uint8> in_gpu, out_gpu;
in_gpu.resize(size + 4);
out_gpu.resize(size + 4);
for (uintptr_t offset = 0; offset < 4; offset++) {
std::iota(in_cpu.begin(), in_cpu.end(), 0);
CUDA_CALL(cudaMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
copyH2D(in_gpu.data() + offset, in_cpu.data(), size);
SmallVector<int, 6> perm = { 1, 2, 0, 3 };
int grid_size = 1024;
TiledTransposeDesc<uint8> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu.data() + offset,
in_gpu.data() + offset, grid_size);
EXPECT_EQ(desc.lanes, 4) << "Lanes not detected";
EXPECT_EQ(desc.ndim, 3) << "Number of dimensions should have shrunk in favor of lanes";
TransposeTiledSingle<<<grid_size, dim3(32, 16), kTiledTransposeMaxSharedMem>>>(desc);
copyD2H(out_cpu.data(), out_gpu.data() + offset, size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm.data(), perm.size());
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeTiled, BuildDescVectorized16BitOpt) {
TensorShape<> shape = { 57, 37, 53, 4 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<uint16_t> in_cpu(size), out_cpu(size);
vector<uint16_t> ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<uint16_t> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
CUDA_CALL(cudaMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
copyH2D(in_gpu.data(), in_cpu.data(), size);
SmallVector<int, 6> perm = { 1, 2, 0, 3 };
int grid_size = 1024;
TiledTransposeDesc<uint16_t> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu, in_gpu, grid_size);
EXPECT_EQ(desc.lanes, 4) << "Lanes not detected";
EXPECT_EQ(desc.ndim, 3) << "Number of dimensions should have shrunk in favor of lanes";
TransposeTiledSingle<<<grid_size, dim3(32, 16), kTiledTransposeMaxSharedMem>>>(desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm.data(), perm.size());
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
TEST(TransposeTiled, HighDimensionTest) {
TensorShape<> shape = {3, 3, 5, 7, 23, 3, 37, 4 }; // a bunch of primes, just to make it harder
int size = volume(shape);
vector<uint8> in_cpu(size), out_cpu(size);
vector<uint8> ref(size);
DeviceBuffer<uint8> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
for (int size_of_last_dim = 1; size_of_last_dim <= 4; size_of_last_dim++) {
shape = { 3, 3, 5, 7, 23, 3, 37, size_of_last_dim };
size = volume(shape);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
CUDA_CALL(cudaMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
copyH2D(in_gpu.data(), in_cpu.data(), size);
SmallVector<int, 8> perm = { 1, 0, 4, 2, 6, 3, 5, 7 };
int grid_size = 1024;
TiledTransposeDesc<uint8> desc;
memset(&desc, 0xCC, sizeof(desc));
InitTiledTranspose(desc, shape, make_span(perm), out_gpu.data(), in_gpu.data(), grid_size);
TransposeTiledSingle<<<grid_size, dim3(32, 16), kTiledTransposeMaxSharedMem>>>(desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm.data(), perm.size());
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeDeinterleave, AllPerm4DInnermost) {
int channels = 3;
TensorShape<> shape = { 19, 157, 137, channels }; // small inner dimension
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
int block_size = 256;
int grid_size = std::max(1, size / (block_size * channels));
ASSERT_LT(grid_size * block_size * channels, size)
<< "Weak test error: Grid too large to test grid loop";
auto start = CUDAEvent::CreateWithFlags(0);
auto end = CUDAEvent::CreateWithFlags(0);
for (auto &perm : testing::Permutations4) {
if (perm[3] == 3)
continue; // innermost dim must be permuted
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << "\n";
CUDA_CALL(cudaMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
DeinterleaveDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitDeinterleave(desc, shape, make_span(perm), out_gpu, in_gpu);
CUDA_CALL(cudaEventRecord(start));
TransposeDeinterleaveSingle<<<grid_size, block_size>>>(desc);
CUDA_CALL(cudaEventRecord(end));
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
float time;
CUDA_CALL(cudaEventElapsedTime(&time, start, end));
time *= 1e+6;
std::cerr << 2*size*sizeof(*in_gpu.data()) / time << " GB/s" << "\n";
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
TEST(TransposeGeneric, AllPerm4D) {
TensorShape<> shape = { 31, 43, 53, 47 };
int size = volume(shape);
vector<int> in_cpu(size), out_cpu(size), ref(size);
std::iota(in_cpu.begin(), in_cpu.end(), 0);
DeviceBuffer<int> in_gpu, out_gpu;
in_gpu.resize(size);
out_gpu.resize(size);
copyH2D(in_gpu.data(), in_cpu.data(), size);
int grid_size = 2048;
int block_size = 256;
ASSERT_LT(grid_size * block_size, size) << "Weak test error: Grid too large to test grid loop";
for (auto &perm : testing::Permutations4) {
std::cerr << "Testing permutation "
<< perm[0] << " " << perm[1] << " " << perm[2] << " " << perm[3] << " input shape "
<< shape << "\n";
CUDA_CALL(cudaMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
GenericTransposeDesc<int> desc;
memset(&desc, 0xCC, sizeof(desc));
InitGenericTranspose(desc, shape, make_span(perm), out_gpu, in_gpu);
TransposeGenericSingle<<<grid_size, block_size>>>(desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
testing::RefTranspose(ref.data(), in_cpu.data(), shape.data(), perm, 4);
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
TensorShape<> simplified_shape;
SmallVector<int, 6> simplified_perm;
SimplifyPermute(simplified_shape, simplified_perm, shape.data(), perm, 4);
if (simplified_shape == shape) {
for (int i = 0; i < 4; i++) {
ASSERT_EQ(simplified_perm[i], perm[i]);
}
// no simplification, don't repeat the test
continue;
}
std::cerr << "Testing permutation ";
for (auto i : simplified_perm)
std::cerr << i << " ";
std::cerr << " input shape " << simplified_shape << "\n";
memset(&desc, 0xCC, sizeof(desc));
CUDA_CALL(cudaMemset(out_gpu, 0xff, size*sizeof(*in_gpu.data())));
InitGenericTranspose(desc, simplified_shape, make_span(simplified_perm), out_gpu, in_gpu);
TransposeGenericSingle<<<grid_size, block_size>>>(desc);
copyD2H(out_cpu.data(), out_gpu.data(), size);
for (int i = 0; i < size; i++) {
ASSERT_EQ(out_cpu[i], ref[i]) << " at " << i;
}
}
}
} // namespace kernels
} // namespace dali
|
316ae6e430d2d4d514a2762d459ac81a33da1a9c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define CUDACHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
class TestClass {
public:
int * data;
size_t len;
TestClass(size_t len) {
printf("Constructor\n");
this->data = nullptr;
this->len = len;
}
~TestClass(){
printf("~Destructor\n");
}
__host__ void allocate(){
CUDACHECK(hipMalloc((void**) &this->data, this->len * sizeof(int)));
CUDACHECK(hipMemset(this->data, 0, this->len * sizeof(int)));
}
__host__ void free(){
CUDACHECK(hipFree(this->data));
this->data = nullptr;
}
__device__ int get(size_t index){
return this->data[index];
}
__device__ void set(size_t index, int value){
this->data[index] = value;
}
};
__global__ void test_kernel(unsigned int threads, TestClass * d_instance){
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < threads){
// printf("Thread %u\n", tid);
printf("Thread %u: d_isntance %p, element %d\n", tid, d_instance, d_instance->get(tid));
}
}
void test_class_launch(){
const size_t N = 16;
// Construct on the host
TestClass * h_instance = new TestClass(N);
// Construct.
printf("construct...\n");
h_instance->allocate();
printf("h_instance %p \n", h_instance);
// Launch a kernel with the instance as the parameter
printf("kernel...\n");
hipLaunchKernelGGL(( test_kernel), dim3(1), dim3(N), 0, 0, N, h_instance);
CUDACHECK(hipDeviceSynchronize());
printf("synced...\n");
// Free
printf("free...\n");
h_instance->free();
delete h_instance;
}
int main(int argc, char * argv[]){
printf("main\n");
test_class_launch();
return 1;
}
|
316ae6e430d2d4d514a2762d459ac81a33da1a9c.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
#define CUDACHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
class TestClass {
public:
int * data;
size_t len;
TestClass(size_t len) {
printf("Constructor\n");
this->data = nullptr;
this->len = len;
}
~TestClass(){
printf("~Destructor\n");
}
__host__ void allocate(){
CUDACHECK(cudaMalloc((void**) &this->data, this->len * sizeof(int)));
CUDACHECK(cudaMemset(this->data, 0, this->len * sizeof(int)));
}
__host__ void free(){
CUDACHECK(cudaFree(this->data));
this->data = nullptr;
}
__device__ int get(size_t index){
return this->data[index];
}
__device__ void set(size_t index, int value){
this->data[index] = value;
}
};
__global__ void test_kernel(unsigned int threads, TestClass * d_instance){
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < threads){
// printf("Thread %u\n", tid);
printf("Thread %u: d_isntance %p, element %d\n", tid, d_instance, d_instance->get(tid));
}
}
void test_class_launch(){
const size_t N = 16;
// Construct on the host
TestClass * h_instance = new TestClass(N);
// Construct.
printf("construct...\n");
h_instance->allocate();
printf("h_instance %p \n", h_instance);
// Launch a kernel with the instance as the parameter
printf("kernel...\n");
test_kernel<<<1, N>>>(N, h_instance);
CUDACHECK(cudaDeviceSynchronize());
printf("synced...\n");
// Free
printf("free...\n");
h_instance->free();
delete h_instance;
}
int main(int argc, char * argv[]){
printf("main\n");
test_class_launch();
return 1;
}
|
2f98ee00d08f605f6916d207e9f8cf9cb4522a00.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "errchk.cuh"
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
template <bool isNP2>
__device__ void loadSharedChunkFromMem(unsigned *s_data,
const unsigned *idata,
int n, int baseIndex,
int& ai, int& bi,
int& mem_ai, int& mem_bi,
int& bankOffsetA, int& bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] = idata[mem_ai];
if (isNP2) {
s_data[bi + bankOffsetB] = (bi < n) ? idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = idata[mem_bi];
}
}
template <bool isNP2>
__device__
void storeSharedChunkToMem(unsigned* odata,
const unsigned* s_data,
int n,
int ai, int bi,
int mem_ai, int mem_bi,
int bankOffsetA, int bankOffsetB) {
__syncthreads();
odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) {
if (bi < n)
odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__
void clearLastElement(unsigned* s_data,
unsigned *blockSums,
int blockIndex) {
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) {
blockSums[blockIndex] = s_data[index];
}
s_data[index] = 0;
}
}
__device__
unsigned int buildSum(unsigned *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__
void scanRootToLeaves(unsigned *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d)
{
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__
void prescanBlock(unsigned *data, int blockIndex, unsigned *blockSums) {
int stride = buildSum(data);
clearLastElement<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride);
}
template <bool storeSum, bool isNP2>
__global__
void prescan(unsigned *odata,
const unsigned *idata,
unsigned *blockSums,
int n,
int blockIndex,
int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ unsigned s_data[];
loadSharedChunkFromMem<isNP2>(s_data, idata, n,
(baseIndex == 0) ?
__mul24(blockIdx.x, (blockDim.x << 1)):baseIndex,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
prescanBlock<storeSum>(s_data, blockIndex, blockSums);
storeSharedChunkToMem<isNP2>(odata, s_data, n,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
}
__global__
void uniformAdd(unsigned *data,
unsigned *uniforms,
int n,
int blockOffset,
int baseIndex) {
__shared__ unsigned uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
data[address] += uni;
data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
|
2f98ee00d08f605f6916d207e9f8cf9cb4522a00.cu
|
#include "errchk.cuh"
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
template <bool isNP2>
__device__ void loadSharedChunkFromMem(unsigned *s_data,
const unsigned *idata,
int n, int baseIndex,
int& ai, int& bi,
int& mem_ai, int& mem_bi,
int& bankOffsetA, int& bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] = idata[mem_ai];
if (isNP2) {
s_data[bi + bankOffsetB] = (bi < n) ? idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = idata[mem_bi];
}
}
template <bool isNP2>
__device__
void storeSharedChunkToMem(unsigned* odata,
const unsigned* s_data,
int n,
int ai, int bi,
int mem_ai, int mem_bi,
int bankOffsetA, int bankOffsetB) {
__syncthreads();
odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) {
if (bi < n)
odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__
void clearLastElement(unsigned* s_data,
unsigned *blockSums,
int blockIndex) {
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) {
blockSums[blockIndex] = s_data[index];
}
s_data[index] = 0;
}
}
__device__
unsigned int buildSum(unsigned *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__
void scanRootToLeaves(unsigned *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d)
{
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum>
__device__
void prescanBlock(unsigned *data, int blockIndex, unsigned *blockSums) {
int stride = buildSum(data);
clearLastElement<storeSum>(data, blockSums,
(blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride);
}
template <bool storeSum, bool isNP2>
__global__
void prescan(unsigned *odata,
const unsigned *idata,
unsigned *blockSums,
int n,
int blockIndex,
int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ unsigned s_data[];
loadSharedChunkFromMem<isNP2>(s_data, idata, n,
(baseIndex == 0) ?
__mul24(blockIdx.x, (blockDim.x << 1)):baseIndex,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
prescanBlock<storeSum>(s_data, blockIndex, blockSums);
storeSharedChunkToMem<isNP2>(odata, s_data, n,
ai, bi, mem_ai, mem_bi,
bankOffsetA, bankOffsetB);
}
__global__
void uniformAdd(unsigned *data,
unsigned *uniforms,
int n,
int blockOffset,
int baseIndex) {
__shared__ unsigned uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
data[address] += uni;
data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
|
baad19c67cc6654ab8a93e610b477553a0b2c54b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#define DATATYPE int
#define ARRAYLEN 1024*1024*256
#define REP 128
#define TIMETESTEVENT
#include <hip/hip_runtime.h>
#include "repeat.h"
__global__ void test_global_latency(double *time,DATATYPE *out,int its,DATATYPE *array)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
// for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0;
out[0] =p;
time[0] = time_tmp;
}
texture <int,1,hipReadModeElementType> texref;
__global__ void test_texture_latency(double *time,DATATYPE *out,int its)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
// for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=tex1Dfetch(texref,p);)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0;
out[1] =p;
time[1] = time_tmp;
}
void call_test_latency(DATATYPE *h_array,DATATYPE *d_array,int step,int its,double *h_time,double *d_time,DATATYPE *d_out,DATATYPE *h_out)
{
printf("111 111\n");
if (hipSuccess != hipMemcpy(d_array,h_array,sizeof(DATATYPE)*ARRAYLEN,hipMemcpyHostToDevice)){ printf("1\n"); return; }
printf("111 222\n");
hipLaunchKernelGGL(( test_global_latency) , dim3(1),dim3(1), 0, 0, d_time,d_out,its,d_array);
if (hipDeviceSynchronize() != hipSuccess){
printf("3\n");
return;
}
printf("111 333\n");
hipMemcpy(h_time,d_time,sizeof(double),hipMemcpyDeviceToHost);
printf("%d:\t%f\t\n",step,h_time[0]);
// printf("111 444\n");
}
|
baad19c67cc6654ab8a93e610b477553a0b2c54b.cu
|
#include <stdlib.h>
#include <stdio.h>
#define DATATYPE int
#define ARRAYLEN 1024*1024*256
#define REP 128
#define TIMETESTEVENT
#include <cuda_runtime.h>
#include "repeat.h"
__global__ void test_global_latency(double *time,DATATYPE *out,int its,DATATYPE *array)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
// for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0;
out[0] =p;
time[0] = time_tmp;
}
texture <int,1,cudaReadModeElementType> texref;
__global__ void test_texture_latency(double *time,DATATYPE *out,int its)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
// for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=tex1Dfetch(texref,p);)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0;
out[1] =p;
time[1] = time_tmp;
}
void call_test_latency(DATATYPE *h_array,DATATYPE *d_array,int step,int its,double *h_time,double *d_time,DATATYPE *d_out,DATATYPE *h_out)
{
printf("111 111\n");
if (cudaSuccess != cudaMemcpy(d_array,h_array,sizeof(DATATYPE)*ARRAYLEN,cudaMemcpyHostToDevice)){ printf("1\n"); return; }
printf("111 222\n");
test_global_latency <<<1,1>>>(d_time,d_out,its,d_array);
if (cudaDeviceSynchronize() != cudaSuccess){
printf("3\n");
return;
}
printf("111 333\n");
cudaMemcpy(h_time,d_time,sizeof(double),cudaMemcpyDeviceToHost);
printf("%d:\t%f\t\n",step,h_time[0]);
// printf("111 444\n");
}
|
a32d77d2094169aa9d3dd59572ce52c161d86caf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float def1, def2, def3, def4, def5, def6;
float use1, use2, use3, use4, use5, use6 = 0;
float I1=A[i];
float I2=B[i];
float Value;
#pragma unroll 100
// Excessive Addition access
for(unsigned k=0; k<iterations;k++) {
def1= __fmaf_rn(I1,I2,use1);
def2= __fmaf_rn(I1,I2,use2);
def3= __fmaf_rn(I1,I2,use3);
def4= __fmaf_rn(I1,I2,use4);
def5= __fmaf_rn(I1,I2,use5);
def6= __fmaf_rn(I1,I2,use6);
use1= __fmaf_rn(def3,def1,def2);
use2= __fmaf_rn(def1,def4,def2);
use3= __fmaf_rn(def1,def2,def3);
use4= __fmaf_rn(def6,def4,def5);
use5= __fmaf_rn(def4,def2,def5);
use6= __fmaf_rn(def4,def5,def6);
}
__syncthreads();
Value=(use1+use2)* (use3 + use4) * (use5 + use6);
C[i]=Value;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if(argc!=2) {
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks with iterations %d\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
a32d77d2094169aa9d3dd59572ce52c161d86caf.cu
|
#include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float def1, def2, def3, def4, def5, def6;
float use1, use2, use3, use4, use5, use6 = 0;
float I1=A[i];
float I2=B[i];
float Value;
#pragma unroll 100
// Excessive Addition access
for(unsigned k=0; k<iterations;k++) {
def1= __fmaf_rn(I1,I2,use1);
def2= __fmaf_rn(I1,I2,use2);
def3= __fmaf_rn(I1,I2,use3);
def4= __fmaf_rn(I1,I2,use4);
def5= __fmaf_rn(I1,I2,use5);
def6= __fmaf_rn(I1,I2,use6);
use1= __fmaf_rn(def3,def1,def2);
use2= __fmaf_rn(def1,def4,def2);
use3= __fmaf_rn(def1,def2,def3);
use4= __fmaf_rn(def6,def4,def5);
use5= __fmaf_rn(def4,def2,def5);
use6= __fmaf_rn(def4,def5,def6);
}
__syncthreads();
Value=(use1+use2)* (use3 + use4) * (use5 + use6);
C[i]=Value;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if(argc!=2) {
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks with iterations %d\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
5e1bec1a37accedb97a57f3dc1391640d46a1a5b.hip
|
// !!! This is a file automatically generated by hipify!!!
//=============================================================//
// //
// ||Gpu Accelerated Fineite Element || //
// //
// --------Version 2.0s---------- //
// //
// //
// //
// Authors: Andrew Konya (Kent State University) //
// Robin Selinger (Kent State University) //
// Badel MBanga (kent State University) //
// //
// Finite elemnt simulation executed on GPU using CUDA //
// Hybrid MD finite element algorithm used to allow //
// all computations be implemented locally requireing //
// parallelization of all prccess in calculation //
// //
//=============================================================//
#include "mainhead.h"
int main()
{
//Get Device properties
hipDeviceProp_t prop;
HANDLE_ERROR(hipGetDeviceProperties(&prop,0));
printf( "Code executing on %s\n\n", prop.name );
//displayGPUinfo(prop);
int Ntets,Nnodes;
//get dimensions of the mesh
get_mesh_dim(Ntets,Nnodes);
//create objects of TetArray and NodeArray class with correct size
TetArray Tet = TetArray(Ntets);
NodeArray Node = NodeArray(Nnodes);
//read the mesh into Node and Tet objects
get_mesh(Node,Tet,Ntets,Nnodes);
//get positions of tetrahedra
get_tet_pos(Node,Tet,Ntets);
//set director n for each tetrahedra
// set_n(Tet,Ntets);
// comment out GPU calculations while Debugging director sim
//reorder tetrahedra
gorder_tet(Node,Tet,Ntets);
//re-order nodes and reassing tetrahedra component lists
finish_order(Node,Tet,Ntets,Nnodes);
//find initial A's and invert them store all in Tet object
// init_As(Node,Tet,Ntets);
//print spacefilling curve to represent adjacensy between tetrahedra
printorder(Tet,Ntets);
//build surface connection graph
connectionGraph graph = connectionGraph(Nnodes);
buildConnectionGraph(Node,Tet,graph,Nnodes,Ntets);
graph.reduce();
graph.setInitial();
graph.calcGauss();
graph.print("Output//surfaceGraph.vtk");
//now ready to prepare for dyanmics
//delcare data stuctures for data on device
//and host
DevDataBlock dev_dat;
HostDataBlock host_dat;
//Pack data to send to device
packdata(Node,Tet,&host_dat,Ntets,Nnodes);
//send data to device
data_to_device(&dev_dat,&host_dat,Ntets,Nnodes);
//Print Simulation Parameters and Such
printf("\n\n Prepared for dynamics with:\n \
steps/frame = %d\n \
Volume = %f cm^3\n \
Mass = %f kg\n\n",iterPerFrame,host_dat.host_totalVolume,host_dat.host_totalVolume*materialDensity);
//=================================================================
//initillize GPU syncronization arrays
//will store syncronization information
//=================================================================
int Threads_Per_Block = TPB;
int Blocks = (Ntets+Threads_Per_Block)/Threads_Per_Block;
int *Syncin,*Syncout,*g_mutex;
//allocate memory on device for Syncin and Syncoutd
HANDLE_ERROR( hipMalloc( (void**)&Syncin
,Blocks*sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&Syncout
,Blocks*sizeof(int) ) );
int* SyncZeros;
SyncZeros = (int*)malloc(Blocks*sizeof(int));
for (int i=0;i<Blocks;i++){
SyncZeros[i]=0;
}
HANDLE_ERROR( hipMemcpy(Syncin
,SyncZeros
,Blocks*sizeof(int)
,hipMemcpyHostToDevice ) );
//allocate global mutex and set =0
HANDLE_ERROR( hipMalloc( (void**)&g_mutex,
sizeof(int) ) );
HANDLE_ERROR( hipMemset( g_mutex, 0, sizeof(int) ) );
//=================================================================
//run dynamics
//=================================================================
run_dynamics(&dev_dat,&host_dat,&graph,Ntets,Nnodes,Syncin,Syncout,g_mutex);
//check for CUDA erros
any_errors();
//exit program
HANDLE_ERROR( hipFree( Syncin ) );
HANDLE_ERROR(hipFree( Syncout ) );
HANDLE_ERROR(hipFree( g_mutex ) );
exit_program(&dev_dat);
//*/
return 0;
}
|
5e1bec1a37accedb97a57f3dc1391640d46a1a5b.cu
|
//=============================================================//
// //
// ||Gpu Accelerated Fineite Element || //
// //
// --------Version 2.0s---------- //
// //
// //
// //
// Authors: Andrew Konya (Kent State University) //
// Robin Selinger (Kent State University) //
// Badel MBanga (kent State University) //
// //
// Finite elemnt simulation executed on GPU using CUDA //
// Hybrid MD finite element algorithm used to allow //
// all computations be implemented locally requireing //
// parallelization of all prccess in calculation //
// //
//=============================================================//
#include "mainhead.h"
int main()
{
//Get Device properties
cudaDeviceProp prop;
HANDLE_ERROR(cudaGetDeviceProperties(&prop,0));
printf( "Code executing on %s\n\n", prop.name );
//displayGPUinfo(prop);
int Ntets,Nnodes;
//get dimensions of the mesh
get_mesh_dim(Ntets,Nnodes);
//create objects of TetArray and NodeArray class with correct size
TetArray Tet = TetArray(Ntets);
NodeArray Node = NodeArray(Nnodes);
//read the mesh into Node and Tet objects
get_mesh(Node,Tet,Ntets,Nnodes);
//get positions of tetrahedra
get_tet_pos(Node,Tet,Ntets);
//set director n for each tetrahedra
// set_n(Tet,Ntets);
// comment out GPU calculations while Debugging director sim
//reorder tetrahedra
gorder_tet(Node,Tet,Ntets);
//re-order nodes and reassing tetrahedra component lists
finish_order(Node,Tet,Ntets,Nnodes);
//find initial A's and invert them store all in Tet object
// init_As(Node,Tet,Ntets);
//print spacefilling curve to represent adjacensy between tetrahedra
printorder(Tet,Ntets);
//build surface connection graph
connectionGraph graph = connectionGraph(Nnodes);
buildConnectionGraph(Node,Tet,graph,Nnodes,Ntets);
graph.reduce();
graph.setInitial();
graph.calcGauss();
graph.print("Output//surfaceGraph.vtk");
//now ready to prepare for dyanmics
//delcare data stuctures for data on device
//and host
DevDataBlock dev_dat;
HostDataBlock host_dat;
//Pack data to send to device
packdata(Node,Tet,&host_dat,Ntets,Nnodes);
//send data to device
data_to_device(&dev_dat,&host_dat,Ntets,Nnodes);
//Print Simulation Parameters and Such
printf("\n\n Prepared for dynamics with:\n \
steps/frame = %d\n \
Volume = %f cm^3\n \
Mass = %f kg\n\n",iterPerFrame,host_dat.host_totalVolume,host_dat.host_totalVolume*materialDensity);
//=================================================================
//initillize GPU syncronization arrays
//will store syncronization information
//=================================================================
int Threads_Per_Block = TPB;
int Blocks = (Ntets+Threads_Per_Block)/Threads_Per_Block;
int *Syncin,*Syncout,*g_mutex;
//allocate memory on device for Syncin and Syncoutd
HANDLE_ERROR( cudaMalloc( (void**)&Syncin
,Blocks*sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&Syncout
,Blocks*sizeof(int) ) );
int* SyncZeros;
SyncZeros = (int*)malloc(Blocks*sizeof(int));
for (int i=0;i<Blocks;i++){
SyncZeros[i]=0;
}
HANDLE_ERROR( cudaMemcpy(Syncin
,SyncZeros
,Blocks*sizeof(int)
,cudaMemcpyHostToDevice ) );
//allocate global mutex and set =0
HANDLE_ERROR( cudaMalloc( (void**)&g_mutex,
sizeof(int) ) );
HANDLE_ERROR( cudaMemset( g_mutex, 0, sizeof(int) ) );
//=================================================================
//run dynamics
//=================================================================
run_dynamics(&dev_dat,&host_dat,&graph,Ntets,Nnodes,Syncin,Syncout,g_mutex);
//check for CUDA erros
any_errors();
//exit program
HANDLE_ERROR( cudaFree( Syncin ) );
HANDLE_ERROR(cudaFree( Syncout ) );
HANDLE_ERROR(cudaFree( g_mutex ) );
exit_program(&dev_dat);
//*/
return 0;
}
|
570cbaae2b3b1043cbbc1f8ff372d4516b22044c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include "configuration.h"
/**
This file contains the needed kernel functions for the 4LSTM-MaxPool-4LSTM Multi GPU block (3 GPUs)
Note that each CUDA kernel function has a wrapper function that just calls it
_kernel_x is called in _kernel_x_wrapper
**/
// This kernel transposes the first weights matrix (for GPU1)
static __global__ void _kernel_0(float *buf_weights_gpu1, float *buf_weights_T_gpu1)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
for (int32_t c11 = 0; (c11 <= 1); (c11 += 1))
{
buf_weights_T_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (c11 * ((1 * (4 * FEATURE_SIZE)) * FEATURE_SIZE))) + (c9 * (((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE) * 2)))] = buf_weights_gpu1[((((0 + (((32 * __by__) + __ty__) * 1)) + (((32 * __bx__) + __tx__) * (1 * FEATURE_SIZE))) + (c11 * ((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE) * 2)))];
};
};
};
extern "C" int32_t* _kernel_0_wrapper(float *buf_weights_gpu1, float *buf_weights_T_gpu1)
{
{
dim3 blocks((4 * FEATURE_SIZE) / 32, FEATURE_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_0), dim3(blocks), dim3(threads), 0, 0, buf_weights_gpu1, buf_weights_T_gpu1);
};
return 0;
};
// This kernel initializes the working buffer (h) to 0 (for GPU1)
static __global__ void _kernel_1(float *buf_h_gpu1)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_h_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c9 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_1_wrapper(float *buf_h_gpu1)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE/32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_1), dim3(blocks), dim3(threads), 0, 0, buf_h_gpu1);
};
return 0;
};
// This kernel initializes the working buffer (c) for short term memories to 0 (for GPU1)
static __global__ void _kernel_2(float *buf_c_gpu1)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 <= 3); (c9 += 1))
{
buf_c_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_2_wrapper(float *buf_c_gpu1)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_2), dim3(blocks), dim3(threads), 0, 0, buf_c_gpu1);
};
return 0;
};
// This kernel initializes copies the input to the working buffer (for GPU1)
static __global__ void _kernel_3(float *buf_h_gpu1, float *buf_x_gpu1)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < SEQ_LENGTH/2); (c9 += 1))
{
for (int32_t c11 = 0; (c11 <= 1); (c11 += 1))
{
buf_h_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((2 * c9) + c11) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (0 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = buf_x_gpu1[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((2 * c9) + c11) * ((1 * FEATURE_SIZE) * BATCH_SIZE)))];
};
};
};
extern "C" int32_t* _kernel_3_wrapper(float *buf_h_gpu1, float *buf_x_gpu1)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_3), dim3(blocks), dim3(threads), 0, 0, buf_h_gpu1, buf_x_gpu1);
};
return 0;
};
// This kernel transposes the first weights matrix (for GPU2)
static __global__ void _kernel_4(float *buf_weights_gpu2, float *buf_weights_T_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
for (int32_t c11 = 0; (c11 <= 1); (c11 += 1))
{
buf_weights_T_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * 4 * FEATURE_SIZE))) + (c11 * ((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE))) + (c9 * (((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE) * 2)))] = buf_weights_gpu2[((((0 + (((32 * __by__) + __ty__) * 1)) + (((32 * __bx__) + __tx__) * (1 * FEATURE_SIZE))) + (c11 * ((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE) * 2)))];
};
};
};
extern "C" int32_t* _kernel_4_wrapper(float *buf_weights_gpu2, float *buf_weights_T_gpu2)
{
{
dim3 blocks(4 * FEATURE_SIZE / 32, FEATURE_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_4), dim3(blocks), dim3(threads), 0, 0, buf_weights_gpu2, buf_weights_T_gpu2);
};
return 0;
};
// This kernel transposes the second weights matrix (for GPU2)
static __global__ void _kernel_5(float *buf_weights2_T_gpu2, float *buf_weights2_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
for (int32_t c11 = 0; (c11 <= 1); (c11 += 1))
{
buf_weights2_T_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * 4 * FEATURE_SIZE))) + (c11 * ((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE))) + (c9 * (((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE) * 2)))] = buf_weights2_gpu2[((((0 + (((32 * __by__) + __ty__) * 1)) + (((32 * __bx__) + __tx__) * (1 * FEATURE_SIZE))) + (c11 * ((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE) * 2)))];
};
};
};
//
extern "C" int32_t* _kernel_5_wrapper(float *buf_weights2_T_gpu2, float *buf_weights2_gpu2)
{
{
dim3 blocks(4 * FEATURE_SIZE / 32, FEATURE_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_5), dim3(blocks), dim3(threads), 0, 0, buf_weights2_T_gpu2, buf_weights2_gpu2);
};
return 0;
};
// This kernel initializes the working buffer (h) to 0 (for GPU2)
static __global__ void _kernel_6(float *buf_h_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_h_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c9 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_6_wrapper(float *buf_h_gpu2)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_6), dim3(blocks), dim3(threads), 0, 0, buf_h_gpu2);
};
return 0;
};
// This kernel initializes the working buffer (c) for short term memories to 0 (for GPU2)
static __global__ void _kernel_7(float *buf_c_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_7_wrapper(float *buf_c_gpu2)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_7), dim3(blocks), dim3(threads), 0, 0, buf_c_gpu2);
};
return 0;
};
// This kernel initializes the second working buffer (h2) to 0 (for GPU2)
static __global__ void _kernel_8(float *buf_h2_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_h2_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c9 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_8_wrapper(float *buf_h2_gpu2)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_8), dim3(blocks), dim3(threads), 0, 0, buf_h2_gpu2);
};
return 0;
};
// This kernel initializes the second working buffer (c2) for short term memories to 0 (for GPU2)
static __global__ void _kernel_9(float *buf_c2_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_c2_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_9_wrapper(float *buf_c2_gpu2)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_9), dim3(blocks), dim3(threads), 0, 0, buf_c2_gpu2);
};
return 0;
};
// This kernel transposes the second weights matrix (for GPU3)
static __global__ void _kernel_10(float *buf_weights2_gpu3, float *buf_weights2_T_gpu3)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
for (int32_t c11 = 0; (c11 <= 1); (c11 += 1))
{
buf_weights2_T_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * 4 * FEATURE_SIZE))) + (c11 * ((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE))) + (c9 * (((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE) * 2)))] = buf_weights2_gpu3[((((0 + (((32 * __by__) + __ty__) * 1)) + (((32 * __bx__) + __tx__) * (1 * FEATURE_SIZE))) + (c11 * ((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE) * 2)))];
};
};
};
extern "C" int32_t* _kernel_10_wrapper(float *buf_weights2_gpu3, float *buf_weights2_T_gpu3)
{
{
dim3 blocks(4 * FEATURE_SIZE / 32, FEATURE_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_10), dim3(blocks), dim3(threads), 0, 0, buf_weights2_gpu3, buf_weights2_T_gpu3);
};
return 0;
};
// This kernel initializes the second working buffer (h2) to 0 (for GPU3)
static __global__ void _kernel_11(float *buf_h2_gpu3)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_h2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c9 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_11_wrapper(float *buf_h2_gpu3)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_11), dim3(blocks), dim3(threads), 0, 0, buf_h2_gpu3);
};
return 0;
};
// This kernel initializes the second working buffer (c2) for short term memories to 0 (for GPU3)
static __global__ void _kernel_12(float *buf_c2_gpu3)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_c2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_12_wrapper(float *buf_c2_gpu3)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_12), dim3(blocks), dim3(threads), 0, 0, buf_c2_gpu3);
};
return 0;
};
// Performs gate calculations for GPU1
static __global__ void _kernel_13(int32_t c1, int32_t c3, int32_t c5, int32_t c7, float *buf_biases_gpu1, float *buf_c_gpu1, float *buf_h_gpu1, float *buf_tmp_gpu1)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
buf_tmp_gpu1[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu1[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu1[((0 + (((32 * __bx__) + __tx__) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu1[((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = tanh((buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu1[((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]));
buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu1[((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_c_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = ((buf_tmp_gpu1[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]) + (buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_c_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))]));
buf_h_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = (tanh(buf_c_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))]) * buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]);
};
extern "C" int32_t* _kernel_13_wrapper(int32_t c1, int32_t c3, int32_t c5, int32_t c7, float *buf_biases_gpu1, float *buf_c_gpu1, float *buf_h_gpu1, float *buf_tmp_gpu1)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
hipLaunchKernelGGL(( _kernel_13), dim3(blocks), dim3(threads), 0, 0, c1, c3, c5, c7, buf_biases_gpu1, buf_c_gpu1, buf_h_gpu1, buf_tmp_gpu1);
};
return 0;
};
// Performs gate calculations for the first 4 layers (for GPU2)
static __global__ void _kernel_14(int32_t c1, int32_t c3, int32_t c5, int32_t c7, float *buf_biases_gpu2, float *buf_c_gpu2, float *buf_h_gpu2, float *buf_tmp_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu2[((0 + (((32 * __bx__) + __tx__) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu2[((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = tanh((buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu2[((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu2[((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = ((buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]) + (buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))]));
buf_h_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = (tanh(buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))]) * buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]);
};
extern "C" int32_t* _kernel_14_wrapper(int32_t c1, int32_t c3, int32_t c5, int32_t c7, float *buf_biases_gpu2, float *buf_c_gpu2, float *buf_h_gpu2, float *buf_tmp_gpu2)
{
{
dim3 blocks((15 + 1), (1 + 1), 1);
dim3 threads((31 + 1), (31 + 1), 1);
hipLaunchKernelGGL(( _kernel_14), dim3(blocks), dim3(threads), 0, 0, c1, c3, c5, c7, buf_biases_gpu2, buf_c_gpu2, buf_h_gpu2, buf_tmp_gpu2);
};
return 0;
};
// Performs MAXIMUM POOLING (on GPU2)
static __global__ void _kernel_15(int32_t c1, int32_t c3, float *buf_h_gpu2, float *buf_h2_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c13 = 0; (c13 < GEMM_BATCH); (c13 += 1))
{
buf_h2_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((GEMM_BATCH * c1) - (GEMM_BATCH * c3)) + c13) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (0 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = max(buf_h_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((((2 * GEMM_BATCH) * c1) - ((2 * GEMM_BATCH) * c3)) + (2 * c13)) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (4 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))], buf_h_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((((2 * GEMM_BATCH) * c1) - ((2 * GEMM_BATCH) * c3)) + (2 * c13)) + 2) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (4 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))]);
};
};
extern "C" int32_t* _kernel_15_wrapper(int32_t c1, int32_t c3, float *buf_h_gpu2, float *buf_h2_gpu2)
{
{
dim3 blocks((15 + 1), (1 + 1), 1);
dim3 threads((31 + 1), (31 + 1), 1);
hipLaunchKernelGGL(( _kernel_15), dim3(blocks), dim3(threads), 0, 0, c1, c3, buf_h_gpu2, buf_h2_gpu2);
};
return 0;
};
// Performs gate calculations for the last 4 layers (for GPU2)
static __global__ void _kernel_16(int32_t c1, int32_t c3, int32_t c5, float *buf_biases2_gpu2, float *buf_c_gpu2, float *buf_h_gpu2, float *buf_tmp_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu2[((0 + (((32 * __bx__) + __tx__) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu2[((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = tanh((buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu2[((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu2[((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = ((buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]) + (buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))]));
buf_h_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 4) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = (tanh(buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))]) * buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]);
};
extern "C" int32_t* _kernel_16_wrapper(int32_t c1, int32_t c3, int32_t c5, float *buf_biases2_gpu2, float *buf_c_gpu2, float *buf_h_gpu2, float *buf_tmp_gpu2)
{
{
dim3 blocks((15 + 1), (1 + 1), 1);
dim3 threads((31 + 1), (31 + 1), 1);
hipLaunchKernelGGL(( _kernel_16), dim3(blocks), dim3(threads), 0, 0, c1, c3, c5, buf_biases2_gpu2, buf_c_gpu2, buf_h_gpu2, buf_tmp_gpu2);
};
return 0;
};
// Performs gate calculations for the last 4 LSTM layers (for GPU3)
static __global__ void _kernel_17(int32_t c1, int32_t c3, int32_t c5, float *buf_biases2_gpu3, float *buf_c2_gpu3, float *buf_h2_gpu3, float *buf_tmp2_gpu3)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
buf_tmp2_gpu3[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp2_gpu3[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu3[((0 + (((32 * __bx__) + __tx__) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu3[((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = tanh((buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu3[((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]));
buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu3[((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_c2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = ((buf_tmp2_gpu3[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]) + (buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_c2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))]));
buf_h2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 4) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = (tanh(buf_c2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))]) * buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]);
};
extern "C" int32_t* _kernel_17_wrapper(int32_t c1, int32_t c3, int32_t c5, float *buf_biases2_gpu3, float *buf_c2_gpu3, float *buf_h2_gpu3, float *buf_tmp2_gpu3)
{
{
dim3 blocks((15 + 1), (1 + 1), 1);
dim3 threads((31 + 1), (31 + 1), 1);
hipLaunchKernelGGL(( _kernel_17), dim3(blocks), dim3(threads), 0, 0, c1, c3, c5, buf_biases2_gpu3, buf_c2_gpu3, buf_h2_gpu3, buf_tmp2_gpu3);
};
return 0;
};
// Copies the result from the h2 GPU3 buffer to the output buffer
static __global__ void _kernel_18(float *_C452_b69, float *buf_h2_gpu3)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < SEQ_LENGTH/2); (c9 += 1))
{
_C452_b69[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (FEATURE_SIZE - 0)))) + (c9 * ((1 * (FEATURE_SIZE - 0)) * (BATCH_SIZE - 0))))] = buf_h2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((c9 + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((NUM_LAYERS) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))];
};
};
extern "C" int32_t* _kernel_18_wrapper(float *_C452_b69, float *buf_h2_gpu3)
{
{
dim3 blocks((15 + 1), (1 + 1), 1);
dim3 threads((31 + 1), (31 + 1), 1);
hipLaunchKernelGGL(( _kernel_18), dim3(blocks), dim3(threads), 0, 0, _C452_b69, buf_h2_gpu3);
};
return 0;
}
|
570cbaae2b3b1043cbbc1f8ff372d4516b22044c.cu
|
#include <stdint.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "configuration.h"
/**
This file contains the needed kernel functions for the 4LSTM-MaxPool-4LSTM Multi GPU block (3 GPUs)
Note that each CUDA kernel function has a wrapper function that just calls it
_kernel_x is called in _kernel_x_wrapper
**/
// This kernel transposes the first weights matrix (for GPU1)
static __global__ void _kernel_0(float *buf_weights_gpu1, float *buf_weights_T_gpu1)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
for (int32_t c11 = 0; (c11 <= 1); (c11 += 1))
{
buf_weights_T_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (c11 * ((1 * (4 * FEATURE_SIZE)) * FEATURE_SIZE))) + (c9 * (((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE) * 2)))] = buf_weights_gpu1[((((0 + (((32 * __by__) + __ty__) * 1)) + (((32 * __bx__) + __tx__) * (1 * FEATURE_SIZE))) + (c11 * ((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE) * 2)))];
};
};
};
extern "C" int32_t* _kernel_0_wrapper(float *buf_weights_gpu1, float *buf_weights_T_gpu1)
{
{
dim3 blocks((4 * FEATURE_SIZE) / 32, FEATURE_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_0<<<blocks, threads>>>(buf_weights_gpu1, buf_weights_T_gpu1);
};
return 0;
};
// This kernel initializes the working buffer (h) to 0 (for GPU1)
static __global__ void _kernel_1(float *buf_h_gpu1)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_h_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c9 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_1_wrapper(float *buf_h_gpu1)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE/32, 1);
dim3 threads(32, 32, 1);
_kernel_1<<<blocks, threads>>>(buf_h_gpu1);
};
return 0;
};
// This kernel initializes the working buffer (c) for short term memories to 0 (for GPU1)
static __global__ void _kernel_2(float *buf_c_gpu1)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 <= 3); (c9 += 1))
{
buf_c_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_2_wrapper(float *buf_c_gpu1)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_2<<<blocks, threads>>>(buf_c_gpu1);
};
return 0;
};
// This kernel initializes copies the input to the working buffer (for GPU1)
static __global__ void _kernel_3(float *buf_h_gpu1, float *buf_x_gpu1)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < SEQ_LENGTH/2); (c9 += 1))
{
for (int32_t c11 = 0; (c11 <= 1); (c11 += 1))
{
buf_h_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((2 * c9) + c11) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (0 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = buf_x_gpu1[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((2 * c9) + c11) * ((1 * FEATURE_SIZE) * BATCH_SIZE)))];
};
};
};
extern "C" int32_t* _kernel_3_wrapper(float *buf_h_gpu1, float *buf_x_gpu1)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_3<<<blocks, threads>>>(buf_h_gpu1, buf_x_gpu1);
};
return 0;
};
// This kernel transposes the first weights matrix (for GPU2)
static __global__ void _kernel_4(float *buf_weights_gpu2, float *buf_weights_T_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
for (int32_t c11 = 0; (c11 <= 1); (c11 += 1))
{
buf_weights_T_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * 4 * FEATURE_SIZE))) + (c11 * ((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE))) + (c9 * (((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE) * 2)))] = buf_weights_gpu2[((((0 + (((32 * __by__) + __ty__) * 1)) + (((32 * __bx__) + __tx__) * (1 * FEATURE_SIZE))) + (c11 * ((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE) * 2)))];
};
};
};
extern "C" int32_t* _kernel_4_wrapper(float *buf_weights_gpu2, float *buf_weights_T_gpu2)
{
{
dim3 blocks(4 * FEATURE_SIZE / 32, FEATURE_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_4<<<blocks, threads>>>(buf_weights_gpu2, buf_weights_T_gpu2);
};
return 0;
};
// This kernel transposes the second weights matrix (for GPU2)
static __global__ void _kernel_5(float *buf_weights2_T_gpu2, float *buf_weights2_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
for (int32_t c11 = 0; (c11 <= 1); (c11 += 1))
{
buf_weights2_T_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * 4 * FEATURE_SIZE))) + (c11 * ((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE))) + (c9 * (((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE) * 2)))] = buf_weights2_gpu2[((((0 + (((32 * __by__) + __ty__) * 1)) + (((32 * __bx__) + __tx__) * (1 * FEATURE_SIZE))) + (c11 * ((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE) * 2)))];
};
};
};
//
extern "C" int32_t* _kernel_5_wrapper(float *buf_weights2_T_gpu2, float *buf_weights2_gpu2)
{
{
dim3 blocks(4 * FEATURE_SIZE / 32, FEATURE_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_5<<<blocks, threads>>>(buf_weights2_T_gpu2, buf_weights2_gpu2);
};
return 0;
};
// This kernel initializes the working buffer (h) to 0 (for GPU2)
static __global__ void _kernel_6(float *buf_h_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_h_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c9 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_6_wrapper(float *buf_h_gpu2)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_6<<<blocks, threads>>>(buf_h_gpu2);
};
return 0;
};
// This kernel initializes the working buffer (c) for short term memories to 0 (for GPU2)
static __global__ void _kernel_7(float *buf_c_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_7_wrapper(float *buf_c_gpu2)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_7<<<blocks, threads>>>(buf_c_gpu2);
};
return 0;
};
// This kernel initializes the second working buffer (h2) to 0 (for GPU2)
static __global__ void _kernel_8(float *buf_h2_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_h2_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c9 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_8_wrapper(float *buf_h2_gpu2)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_8<<<blocks, threads>>>(buf_h2_gpu2);
};
return 0;
};
// This kernel initializes the second working buffer (c2) for short term memories to 0 (for GPU2)
static __global__ void _kernel_9(float *buf_c2_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_c2_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_9_wrapper(float *buf_c2_gpu2)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_9<<<blocks, threads>>>(buf_c2_gpu2);
};
return 0;
};
// This kernel transposes the second weights matrix (for GPU3)
static __global__ void _kernel_10(float *buf_weights2_gpu3, float *buf_weights2_T_gpu3)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
for (int32_t c11 = 0; (c11 <= 1); (c11 += 1))
{
buf_weights2_T_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * 4 * FEATURE_SIZE))) + (c11 * ((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE))) + (c9 * (((1 * 4 * FEATURE_SIZE) * FEATURE_SIZE) * 2)))] = buf_weights2_gpu3[((((0 + (((32 * __by__) + __ty__) * 1)) + (((32 * __bx__) + __tx__) * (1 * FEATURE_SIZE))) + (c11 * ((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * 4 * FEATURE_SIZE) * 2)))];
};
};
};
extern "C" int32_t* _kernel_10_wrapper(float *buf_weights2_gpu3, float *buf_weights2_T_gpu3)
{
{
dim3 blocks(4 * FEATURE_SIZE / 32, FEATURE_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_10<<<blocks, threads>>>(buf_weights2_gpu3, buf_weights2_T_gpu3);
};
return 0;
};
// This kernel initializes the second working buffer (h2) to 0 (for GPU3)
static __global__ void _kernel_11(float *buf_h2_gpu3)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_h2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c9 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_11_wrapper(float *buf_h2_gpu3)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_11<<<blocks, threads>>>(buf_h2_gpu3);
};
return 0;
};
// This kernel initializes the second working buffer (c2) for short term memories to 0 (for GPU3)
static __global__ void _kernel_12(float *buf_c2_gpu3)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < NUM_LAYERS); (c9 += 1))
{
buf_c2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (0 * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c9 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = 0;
};
};
extern "C" int32_t* _kernel_12_wrapper(float *buf_c2_gpu3)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_12<<<blocks, threads>>>(buf_c2_gpu3);
};
return 0;
};
// Performs gate calculations for GPU1
static __global__ void _kernel_13(int32_t c1, int32_t c3, int32_t c5, int32_t c7, float *buf_biases_gpu1, float *buf_c_gpu1, float *buf_h_gpu1, float *buf_tmp_gpu1)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
buf_tmp_gpu1[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu1[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu1[((0 + (((32 * __bx__) + __tx__) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu1[((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = tanh((buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu1[((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]));
buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu1[((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_c_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = ((buf_tmp_gpu1[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]) + (buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_c_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))]));
buf_h_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = (tanh(buf_c_gpu1[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))]) * buf_tmp_gpu1[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]);
};
extern "C" int32_t* _kernel_13_wrapper(int32_t c1, int32_t c3, int32_t c5, int32_t c7, float *buf_biases_gpu1, float *buf_c_gpu1, float *buf_h_gpu1, float *buf_tmp_gpu1)
{
{
dim3 blocks(FEATURE_SIZE / 32, BATCH_SIZE / 32, 1);
dim3 threads(32, 32, 1);
_kernel_13<<<blocks, threads>>>(c1, c3, c5, c7, buf_biases_gpu1, buf_c_gpu1, buf_h_gpu1, buf_tmp_gpu1);
};
return 0;
};
// Performs gate calculations for the first 4 layers (for GPU2)
static __global__ void _kernel_14(int32_t c1, int32_t c3, int32_t c5, int32_t c7, float *buf_biases_gpu2, float *buf_c_gpu2, float *buf_h_gpu2, float *buf_tmp_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu2[((0 + (((32 * __bx__) + __tx__) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu2[((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = tanh((buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu2[((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases_gpu2[((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (c3 * (1 * (4 * FEATURE_SIZE))))]))));
buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = ((buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]) + (buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))]));
buf_h_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 + 1) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))] = (tanh(buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (c3 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))]) * buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + ((((c1 - c3) * 2 + c5) * GEMM_BATCH + c7) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]);
};
extern "C" int32_t* _kernel_14_wrapper(int32_t c1, int32_t c3, int32_t c5, int32_t c7, float *buf_biases_gpu2, float *buf_c_gpu2, float *buf_h_gpu2, float *buf_tmp_gpu2)
{
{
dim3 blocks((15 + 1), (1 + 1), 1);
dim3 threads((31 + 1), (31 + 1), 1);
_kernel_14<<<blocks, threads>>>(c1, c3, c5, c7, buf_biases_gpu2, buf_c_gpu2, buf_h_gpu2, buf_tmp_gpu2);
};
return 0;
};
// Performs MAXIMUM POOLING (on GPU2)
static __global__ void _kernel_15(int32_t c1, int32_t c3, float *buf_h_gpu2, float *buf_h2_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c13 = 0; (c13 < GEMM_BATCH); (c13 += 1))
{
buf_h2_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((((GEMM_BATCH * c1) - (GEMM_BATCH * c3)) + c13) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (0 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = max(buf_h_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((((2 * GEMM_BATCH) * c1) - ((2 * GEMM_BATCH) * c3)) + (2 * c13)) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (4 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))], buf_h_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((((2 * GEMM_BATCH) * c1) - ((2 * GEMM_BATCH) * c3)) + (2 * c13)) + 2) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + (4 * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH + 1))))]);
};
};
extern "C" int32_t* _kernel_15_wrapper(int32_t c1, int32_t c3, float *buf_h_gpu2, float *buf_h2_gpu2)
{
{
dim3 blocks((15 + 1), (1 + 1), 1);
dim3 threads((31 + 1), (31 + 1), 1);
_kernel_15<<<blocks, threads>>>(c1, c3, buf_h_gpu2, buf_h2_gpu2);
};
return 0;
};
// Performs gate calculations for the last 4 layers (for GPU2)
static __global__ void _kernel_16(int32_t c1, int32_t c3, int32_t c5, float *buf_biases2_gpu2, float *buf_c_gpu2, float *buf_h_gpu2, float *buf_tmp_gpu2)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu2[((0 + (((32 * __bx__) + __tx__) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu2[((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = tanh((buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu2[((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]));
buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu2[((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = ((buf_tmp_gpu2[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]) + (buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))]));
buf_h_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 4) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = (tanh(buf_c_gpu2[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))]) * buf_tmp_gpu2[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]);
};
extern "C" int32_t* _kernel_16_wrapper(int32_t c1, int32_t c3, int32_t c5, float *buf_biases2_gpu2, float *buf_c_gpu2, float *buf_h_gpu2, float *buf_tmp_gpu2)
{
{
dim3 blocks((15 + 1), (1 + 1), 1);
dim3 threads((31 + 1), (31 + 1), 1);
_kernel_16<<<blocks, threads>>>(c1, c3, c5, buf_biases2_gpu2, buf_c_gpu2, buf_h_gpu2, buf_tmp_gpu2);
};
return 0;
};
// Performs gate calculations for the last 4 LSTM layers (for GPU3)
static __global__ void _kernel_17(int32_t c1, int32_t c3, int32_t c5, float *buf_biases2_gpu3, float *buf_c2_gpu3, float *buf_h2_gpu3, float *buf_tmp2_gpu3)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
buf_tmp2_gpu3[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp2_gpu3[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu3[((0 + (((32 * __bx__) + __tx__) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu3[((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = tanh((buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu3[((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]));
buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] = (1 / (1 + exp(-(buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] + buf_biases2_gpu3[((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + ((c3 - 5) * (1 * (4 * FEATURE_SIZE))))]))));
buf_c2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = ((buf_tmp2_gpu3[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (2 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]) + (buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + FEATURE_SIZE) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))] * buf_c2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))]));
buf_h2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 4) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))] = (tanh(buf_c2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((((c1 - c3) * GEMM_BATCH + c5) + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((c3 - 5) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))]) * buf_tmp2_gpu3[(((0 + ((((32 * __bx__) + __tx__) + (3 * FEATURE_SIZE)) * 1)) + (((32 * __by__) + __ty__) * (1 * (4 * FEATURE_SIZE)))) + (((c1 - c3) * GEMM_BATCH + c5) * ((1 * (4 * FEATURE_SIZE)) * BATCH_SIZE)))]);
};
extern "C" int32_t* _kernel_17_wrapper(int32_t c1, int32_t c3, int32_t c5, float *buf_biases2_gpu3, float *buf_c2_gpu3, float *buf_h2_gpu3, float *buf_tmp2_gpu3)
{
{
dim3 blocks((15 + 1), (1 + 1), 1);
dim3 threads((31 + 1), (31 + 1), 1);
_kernel_17<<<blocks, threads>>>(c1, c3, c5, buf_biases2_gpu3, buf_c2_gpu3, buf_h2_gpu3, buf_tmp2_gpu3);
};
return 0;
};
// Copies the result from the h2 GPU3 buffer to the output buffer
static __global__ void _kernel_18(float *_C452_b69, float *buf_h2_gpu3)
{
const int32_t __bx__ = (blockIdx.x + 0);
const int32_t __by__ = (blockIdx.y + 0);
const int32_t __tx__ = (threadIdx.x + 0);
const int32_t __ty__ = (threadIdx.y + 0);
for (int32_t c9 = 0; (c9 < SEQ_LENGTH/2); (c9 += 1))
{
_C452_b69[(((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * (FEATURE_SIZE - 0)))) + (c9 * ((1 * (FEATURE_SIZE - 0)) * (BATCH_SIZE - 0))))] = buf_h2_gpu3[((((0 + (((32 * __bx__) + __tx__) * 1)) + (((32 * __by__) + __ty__) * (1 * FEATURE_SIZE))) + ((c9 + 1) * ((1 * FEATURE_SIZE) * BATCH_SIZE))) + ((NUM_LAYERS) * (((1 * FEATURE_SIZE) * BATCH_SIZE) * (SEQ_LENGTH/2 + 1))))];
};
};
extern "C" int32_t* _kernel_18_wrapper(float *_C452_b69, float *buf_h2_gpu3)
{
{
dim3 blocks((15 + 1), (1 + 1), 1);
dim3 threads((31 + 1), (31 + 1), 1);
_kernel_18<<<blocks, threads>>>(_C452_b69, buf_h2_gpu3);
};
return 0;
}
|
query_cross_entropy.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "query_cross_entropy.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <library/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
#include <contrib/libs/cub/cub/util_ptx.cuh>
#include <cassert>
#include <cstdio>
using namespace cooperative_groups;
namespace NKernel {
//TODO(noxoomo): multiple docs per thread to reduce sync overhead
template <int BlockSize, bool IsSingleClassBlock>
__forceinline__ __device__ void QueryCrossEntropySingleBlockImpl(const float alpha,
const float* targets,
const float* weights,
const float* values,
const int offset,
const int size,
const int* qids,
const ui32* qOffsets,
const bool* isSingleClassFlags,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2) {
__shared__ float sharedDer[BlockSize];
__shared__ float sharedDer2[BlockSize];
isSingleClassFlags += offset;
qids += offset;
if (ders) {
ders += offset;
}
if (ders2llp) {
ders2llp += offset;
}
if (ders2llmax) {
ders2llmax += offset;
}
const float MAX_SHIFT = 20;
const int tid = threadIdx.x;
const int loadIdx = tid < size ? offset + tid : 0;
const bool isSingleClass = tid < size ? isSingleClassFlags[tid] : true;
const int tidQid = tid < size ? Ldg(qids + tid) : -1;
const ui32 queryOffset = tid < size ? Ldg(qOffsets + tidQid) : 0;
const int querySize = tid < size ? Ldg(qOffsets + tidQid + 1) - queryOffset : 0;
const int localIdx = tid < size ? offset + tid - queryOffset : 0;
const float clazz = tid < size ? Ldg(targets + loadIdx) : 0;
const float cursor = tid < size ? Ldg(values + loadIdx) : 0;
const float w = tid < size ? Ldg(weights + loadIdx) : 0;
float left = -MAX_SHIFT;
float right = MAX_SHIFT;
float bestShift = (left + right) / 2;
int reduceSize = 0;
if (!IsSingleClassBlock) {
{
sharedDer[tid] = querySize;
__syncthreads();
for (int s = BlockSize >> 1; s > 0; s >>= 1) {
if (tid < s) {
sharedDer[tid] = max(sharedDer[tid], sharedDer[tid + s]);
}
__syncthreads();
}
reduceSize = (1 << int(ceil(log2(sharedDer[0])) - 1));
__syncthreads();
}
float midDer = 0;
#pragma unroll
for (int i = 0; i < 8; ++i) {
const float tmp = __expf(cursor + bestShift);
const float p = ClipProb((isfinite(1.0f + tmp) ? (tmp / (1.0f + tmp)) : 1.0f));
sharedDer[tid] = w * (clazz - p);
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer[tid] += sharedDer[tid + s];
}
__syncthreads();
}
midDer = sharedDer[tid - localIdx];
if (midDer > 0) {
left = bestShift;
} else {
right = bestShift;
}
bestShift = (left + right) / 2;
__syncthreads();
}
#pragma unroll
for (int i = 0; i < 5; ++i) {
const float tmp = __expf(cursor + bestShift);
const float p = ClipProb(isfinite(1.0f + tmp) ? (tmp / (1.0f + tmp)) : 1.0f);
__syncthreads();
sharedDer[tid] = w * (clazz - p);
sharedDer2[tid] = w * (1.0f - p) * p;
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer[tid] += sharedDer[tid + s];
sharedDer2[tid] += sharedDer2[tid + s];
}
__syncthreads();
}
float currentDer = sharedDer[tid - localIdx];
if (currentDer > 0) {
left = bestShift;
} else {
right = bestShift;
}
bestShift += currentDer / (sharedDer2[tid - localIdx] + 1e-9f);
if (bestShift > right) {
bestShift = 0.1f * left + 0.9f * right;
}
if (bestShift < left) {
bestShift = 0.9f * left + 0.1f * right;
}
__syncthreads();
}
}
const float shiftedApprox = cursor + bestShift;
const float expVal = __expf(cursor);
const float expShiftedVal = __expf(shiftedApprox);
if (functionValue) {
const float logExpValPlusOne = isfinite(expVal) ? __logf(1.0f + expVal) : cursor;
const float llp = (tid < size) ? (clazz * cursor - logExpValPlusOne) : 0;
const float logExpValPlusOneShifted = isfinite(expShiftedVal) ? __logf(1.0f + expShiftedVal) : shiftedApprox;
const float llmax = (tid < size) ? (clazz * shiftedApprox - logExpValPlusOneShifted) : 0;
const float docScore = (1.0f - alpha) * llp + (isSingleClass ? 0 : alpha * llmax);
sharedDer[tid] = w * docScore;
__syncthreads();
float blockScore = FastInBlockReduce(tid, sharedDer, BlockSize);
if (tid == 0) {
atomicAdd(functionValue, blockScore);
}
}
const float prob = ClipProb(isfinite(expVal + 1.0f) ? expVal / (1.0f + expVal) : 1.0f);
const float shiftedProb = ClipProb(isfinite(expShiftedVal + 1.0f) ? expShiftedVal / (1.0f + expShiftedVal) : 1.0f);
if (ders && (tid < size)) {
const float derllp = clazz - prob;
const float derllmax = isSingleClass ? 0 : clazz - shiftedProb;
ders[tid] = w * ((1.0f - alpha) * derllp + alpha * derllmax);
}
if (ders2llp && (tid < size)) {
ders2llp[tid] = w * (1.0f - alpha) * prob * (1.0f - prob);
}
float der2llmax = isSingleClass ? 0 : w * alpha * shiftedProb * (1.0f - shiftedProb);
if (ders2llmax && (tid < size)) {
ders2llmax[tid] = der2llmax;
}
if (groupDers2) {
float groupDer2 = 0;
if (!IsSingleClassBlock) {
__syncthreads();
sharedDer2[tid] = der2llmax;
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer2[tid] += sharedDer2[tid + s];
}
__syncthreads();
}
if (localIdx == 0 && tid < size) {
groupDer2 = sharedDer2[tid - localIdx];
}
}
if (localIdx == 0 && tid < size) {
groupDers2[tidQid] = groupDer2;
}
}
}
template <int BlockSize>
__global__ void QueryCrossEntropyImpl(volatile int* qidCursor,
const int qCount,
const float alpha,
const float* targets,
const float* weights,
const float* values,
const int* qids,
const bool* isSingleClassQueries,
const ui32* qOffsets,
const int size,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2) {
while (true) {
int taskQid = 0;
int offset = 0;
int nextTaskOffset = 0;
{
__shared__ int sharedTaskQid;
__shared__ int sharedTaskOffset;
__shared__ int sharedNextTaskOffset;
if (threadIdx.x == 0) {
taskQid = qidCursor[0];
while (true) {
if (taskQid >= qCount) {
break;
}
offset = qOffsets[taskQid];
nextTaskOffset = min(offset + BlockSize, size);
int nextTaskQid = nextTaskOffset < size ? qids[nextTaskOffset] : qCount;
int oldQid = atomicCAS(const_cast<int*>(qidCursor), taskQid, nextTaskQid);
if (oldQid == taskQid) {
nextTaskOffset = qOffsets[nextTaskQid];
break;
} else {
taskQid = oldQid;
}
}
}
if (threadIdx.x == 0) {
sharedTaskQid = taskQid;
sharedTaskOffset = offset;
sharedNextTaskOffset = nextTaskOffset;
}
__syncthreads();
taskQid = sharedTaskQid;
offset = sharedTaskOffset;
nextTaskOffset = sharedNextTaskOffset;
__syncthreads();
}
if (taskQid >= qCount) {
return;
}
const int blockSize = nextTaskOffset - offset;
//we assume, that docs are sorted by isSingleClass mask
//otherwise will be slower for adv-pools
//first part - queries with pairs
//second part - all other queries
bool isSingleClassBlock = threadIdx.x < blockSize ? Ldg(isSingleClassQueries + offset + threadIdx.x) : true;
{
__shared__ float sharedFlags[BlockSize];
sharedFlags[threadIdx.x] = isSingleClassBlock ? 1.0f : 0.0f;
using TOp = TCudaMultiply<float>;
float tmp = FastInBlockReduce<float, TOp>(threadIdx.x, sharedFlags, BlockSize);
if (threadIdx.x == 0) {
sharedFlags[0] = tmp;
}
__syncthreads();
isSingleClassBlock = sharedFlags[0] > 0;
__syncthreads();
}
#define COMPUTE_SINGLE_GROUP(IsSingleClassQuery) \
QueryCrossEntropySingleBlockImpl<BlockSize, IsSingleClassQuery>(alpha, \
targets, weights, values,\
offset, blockSize,\
qids, qOffsets,\
isSingleClassQueries,\
functionValue,\
ders,\
ders2llp,\
ders2llmax,\
groupDers2);
if (isSingleClassBlock) {
COMPUTE_SINGLE_GROUP(true);
} else {
COMPUTE_SINGLE_GROUP(false);
}
__syncthreads();
}
}
void QueryCrossEntropy(int* qidCursor, const int qCount,
const float alpha,
const float* targets,
const float* weights,
const float* values,
const ui32* qids,
const bool* isSingleClassQueries,
const ui32* qOffsets,
const int docCount,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2,
TCudaStream stream)
{
const ui32 maxBlocksPerSm = 4;
const ui32 smCount = TArchProps::SMCount();
const int blockSize = 256;
FillBuffer(qidCursor, 0, 1, stream);
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
hipLaunchKernelGGL(( QueryCrossEntropyImpl<blockSize>) , dim3(maxBlocksPerSm * smCount), dim3(blockSize), 0, stream, qidCursor, qCount, alpha,
targets, weights, values,
(int*)qids, isSingleClassQueries, qOffsets,
docCount,
functionValue,
ders, ders2llp, ders2llmax, groupDers2);
}
__global__ void ComputeQueryLogitMatrixSizesImpl(const ui32* queryOffsets,
const bool* isSingleClassQuery,
ui32 qCount,
ui32* matrixSizes) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const bool isSingleClassFlag = i < qCount ? Ldg(isSingleClassQuery + queryOffsets[i]) : true;
const ui32 qSize = (i < qCount && !isSingleClassFlag) ? queryOffsets[i + 1] - queryOffsets[i] : 0;
if (i <= qCount) {
matrixSizes[i] = qSize * (qSize - 1) / 2;
}
}
void ComputeQueryLogitMatrixSizes(const ui32* queryOffsets,
const bool* isSingleClassQuery,
ui32 qCount,
ui32* matrixSize,
TCudaStream stream) {
const ui32 blockSize = 256;
//matrix count is qCount + 1 (for last index)
const ui32 numBlocks = (qCount + blockSize) / blockSize;
hipLaunchKernelGGL(( ComputeQueryLogitMatrixSizesImpl), dim3(numBlocks), dim3(blockSize), 0, stream, queryOffsets, isSingleClassQuery, qCount, matrixSize);
}
template <int BlockSize, int ThreadsPerQuery>
__global__ void MakePairsQueryLogitImpl(const ui32* queryOffsets,
const ui32* matrixOffsets,
const bool* isSingleClassQuery,
ui32 queryCount,
uint2* pairs) {
const int queriesPerBlock = BlockSize / ThreadsPerQuery;
const int localQid = threadIdx.x / ThreadsPerQuery;
const int qid = blockIdx.x * queriesPerBlock + localQid;
ui32 queryOffset = qid < queryCount ? queryOffsets[qid] : 0;
const bool singleClassFlag = qid < queryCount ? isSingleClassQuery[queryOffset] : true;
ui32 querySize = (qid < queryCount && !singleClassFlag) ? queryOffsets[qid + 1] - queryOffset : 0;
ui32 matrixOffset = qid < queryCount ? matrixOffsets[qid] : 0;
const int x = threadIdx.x & (ThreadsPerQuery - 1);
const ui32 matrixSize = querySize * (querySize - 1) / 2;
pairs += matrixOffset;
for (int i = x; i < matrixSize; i += ThreadsPerQuery) {
uint2 pair = GetPair(i);
pair.x += queryOffset;
pair.y += queryOffset;
pairs[i] = pair;
}
}
void MakeQueryLogitPairs(const ui32* qOffsets,
const ui32* matrixOffset,
const bool* isSingleFlags,
double meanQuerySize,
ui32 qCount,
uint2* pairs,
TCudaStream stream) {
const int blockSize = 128;
#define MAKE_PAIRS(threadsPerQuery) \
const int numBlocks = (qCount * threadsPerQuery + blockSize - 1) / blockSize; \
if (numBlocks > 0) { \
hipLaunchKernelGGL(( MakePairsQueryLogitImpl<blockSize, threadsPerQuery>) , dim3(numBlocks), dim3(blockSize), 0, stream , qOffsets, matrixOffset, isSingleFlags, qCount, pairs); \
}
if (meanQuerySize < 4) {
MAKE_PAIRS(4)
} else if (meanQuerySize < 8) {
MAKE_PAIRS(8)
} else if (meanQuerySize < 16) {
MAKE_PAIRS(16)
} else {
MAKE_PAIRS(32)
}
#undef MAKE_PAIRS
}
template <int BlockSize, int ThreadsPerQuery>
__global__ void MakeIsSingleClassFlagsImpl(const int* queryOffsets, int queryCount,
const ui32* loadIndices, const float* targets,
bool* isSingleClassQuery) {
int bias = queryCount ? Ldg(queryOffsets) : 0;
auto workingTile = tiled_partition<ThreadsPerQuery>(this_thread_block());
const int queriesPerBlock = BlockSize / ThreadsPerQuery;
const int localQid = threadIdx.x / ThreadsPerQuery;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ ui32 results[BlockSize];
const int queryOffset = (qid < queryCount) ? (queryOffsets[qid] - bias) : 0;
const int querySize = (qid < queryCount) ? (queryOffsets[qid + 1] - bias - queryOffset) : 0;
const ui32 firstIdx = qid < queryCount ? loadIndices[queryOffset] : 0;
float firstTarget = Ldg(targets + firstIdx);
int isSingleClass = 1;
for (int i = workingTile.thread_rank(); i < querySize; i += ThreadsPerQuery) {
const ui32 loadIdx = loadIndices[queryOffset + i];
float docTarget = Ldg(targets + loadIdx);
if (abs(firstTarget - docTarget) > 1e-5f) {
isSingleClass = 0;
}
}
using TOp = TCudaMultiply<int>;
isSingleClass = TileReduce<int, ThreadsPerQuery, TOp>(workingTile, isSingleClass);
if (workingTile.thread_rank() == 0) {
results[localQid] = isSingleClass;
workingTile.sync();
}
isSingleClass = results[localQid];
for (int i = workingTile.thread_rank(); i < querySize; i += ThreadsPerQuery) {
isSingleClassQuery[queryOffset + i] = isSingleClass == 1;
}
}
void MakeIsSingleClassFlags(const float* targets, const ui32* loadIndices,
const ui32* queryOffsets,
ui32 queryCount,
double meanQuerySize,
bool* isSingleClassQuery,
TCudaStream stream) {
const int blockSize = 128;
#define RUN_KERNEL(threadsPerQuery) \
const int numBlocks = (queryCount * threadsPerQuery + blockSize - 1) / blockSize; \
if (numBlocks > 0) { \
hipLaunchKernelGGL(( MakeIsSingleClassFlagsImpl<blockSize, threadsPerQuery>) , dim3(numBlocks), dim3(blockSize), 0, stream , (int*)queryOffsets, queryCount, loadIndices, targets, isSingleClassQuery); \
}
if (meanQuerySize < 2) {
RUN_KERNEL(2)
} else if (meanQuerySize < 4) {
RUN_KERNEL(4)
} else if (meanQuerySize < 8) {
RUN_KERNEL(8)
} else if (meanQuerySize < 16) {
RUN_KERNEL(16)
} else {
RUN_KERNEL(32)
}
#undef RUN_KERNEL
}
//for stochastic gradient
__global__ void FillPairDer2AndRemapPairDocumentsImpl(const float* ders2,
const float* groupDers2,
const ui32* docIds,
const ui32* qids,
ui32 pairCount,
float* pairDer2,
uint2* pairs) {
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + tid;
if (i < pairCount) {
uint2 pair = Ldg(pairs + i);
const float der2x = Ldg(ders2 + pair.x);
const float der2y = Ldg(ders2 + pair.y);
const int qid = Ldg(qids + pair.x);
const float groupDer2 = Ldg(groupDers2 + qid);
pair.x = Ldg(docIds + pair.x);
pair.y = Ldg(docIds + pair.y);
pairDer2[i] = groupDer2 > 1e-20f ? der2x * der2y / (groupDer2 + 1e-20f) : 0;
pairs[i] = pair;
}
}
void FillPairDer2AndRemapPairDocuments(const float* ders2,
const float* groupDers2,
const ui32* docIds,
const ui32* qids,
ui32 pairCount,
float* pairDer2,
uint2* pairs,
TCudaStream stream
) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( FillPairDer2AndRemapPairDocumentsImpl), dim3(numBlocks), dim3(blockSize),0, stream , ders2, groupDers2, docIds, qids, pairCount, pairDer2, pairs);
}
}
}
|
query_cross_entropy.cu
|
#include "query_cross_entropy.cuh"
#include <cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <library/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
#include <contrib/libs/cub/cub/util_ptx.cuh>
#include <cassert>
#include <cstdio>
using namespace cooperative_groups;
namespace NKernel {
//TODO(noxoomo): multiple docs per thread to reduce sync overhead
template <int BlockSize, bool IsSingleClassBlock>
__forceinline__ __device__ void QueryCrossEntropySingleBlockImpl(const float alpha,
const float* targets,
const float* weights,
const float* values,
const int offset,
const int size,
const int* qids,
const ui32* qOffsets,
const bool* isSingleClassFlags,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2) {
__shared__ float sharedDer[BlockSize];
__shared__ float sharedDer2[BlockSize];
isSingleClassFlags += offset;
qids += offset;
if (ders) {
ders += offset;
}
if (ders2llp) {
ders2llp += offset;
}
if (ders2llmax) {
ders2llmax += offset;
}
const float MAX_SHIFT = 20;
const int tid = threadIdx.x;
const int loadIdx = tid < size ? offset + tid : 0;
const bool isSingleClass = tid < size ? isSingleClassFlags[tid] : true;
const int tidQid = tid < size ? Ldg(qids + tid) : -1;
const ui32 queryOffset = tid < size ? Ldg(qOffsets + tidQid) : 0;
const int querySize = tid < size ? Ldg(qOffsets + tidQid + 1) - queryOffset : 0;
const int localIdx = tid < size ? offset + tid - queryOffset : 0;
const float clazz = tid < size ? Ldg(targets + loadIdx) : 0;
const float cursor = tid < size ? Ldg(values + loadIdx) : 0;
const float w = tid < size ? Ldg(weights + loadIdx) : 0;
float left = -MAX_SHIFT;
float right = MAX_SHIFT;
float bestShift = (left + right) / 2;
int reduceSize = 0;
if (!IsSingleClassBlock) {
{
sharedDer[tid] = querySize;
__syncthreads();
for (int s = BlockSize >> 1; s > 0; s >>= 1) {
if (tid < s) {
sharedDer[tid] = max(sharedDer[tid], sharedDer[tid + s]);
}
__syncthreads();
}
reduceSize = (1 << int(ceil(log2(sharedDer[0])) - 1));
__syncthreads();
}
float midDer = 0;
#pragma unroll
for (int i = 0; i < 8; ++i) {
const float tmp = __expf(cursor + bestShift);
const float p = ClipProb((isfinite(1.0f + tmp) ? (tmp / (1.0f + tmp)) : 1.0f));
sharedDer[tid] = w * (clazz - p);
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer[tid] += sharedDer[tid + s];
}
__syncthreads();
}
midDer = sharedDer[tid - localIdx];
if (midDer > 0) {
left = bestShift;
} else {
right = bestShift;
}
bestShift = (left + right) / 2;
__syncthreads();
}
#pragma unroll
for (int i = 0; i < 5; ++i) {
const float tmp = __expf(cursor + bestShift);
const float p = ClipProb(isfinite(1.0f + tmp) ? (tmp / (1.0f + tmp)) : 1.0f);
__syncthreads();
sharedDer[tid] = w * (clazz - p);
sharedDer2[tid] = w * (1.0f - p) * p;
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer[tid] += sharedDer[tid + s];
sharedDer2[tid] += sharedDer2[tid + s];
}
__syncthreads();
}
float currentDer = sharedDer[tid - localIdx];
if (currentDer > 0) {
left = bestShift;
} else {
right = bestShift;
}
bestShift += currentDer / (sharedDer2[tid - localIdx] + 1e-9f);
if (bestShift > right) {
bestShift = 0.1f * left + 0.9f * right;
}
if (bestShift < left) {
bestShift = 0.9f * left + 0.1f * right;
}
__syncthreads();
}
}
const float shiftedApprox = cursor + bestShift;
const float expVal = __expf(cursor);
const float expShiftedVal = __expf(shiftedApprox);
if (functionValue) {
const float logExpValPlusOne = isfinite(expVal) ? __logf(1.0f + expVal) : cursor;
const float llp = (tid < size) ? (clazz * cursor - logExpValPlusOne) : 0;
const float logExpValPlusOneShifted = isfinite(expShiftedVal) ? __logf(1.0f + expShiftedVal) : shiftedApprox;
const float llmax = (tid < size) ? (clazz * shiftedApprox - logExpValPlusOneShifted) : 0;
const float docScore = (1.0f - alpha) * llp + (isSingleClass ? 0 : alpha * llmax);
sharedDer[tid] = w * docScore;
__syncthreads();
float blockScore = FastInBlockReduce(tid, sharedDer, BlockSize);
if (tid == 0) {
atomicAdd(functionValue, blockScore);
}
}
const float prob = ClipProb(isfinite(expVal + 1.0f) ? expVal / (1.0f + expVal) : 1.0f);
const float shiftedProb = ClipProb(isfinite(expShiftedVal + 1.0f) ? expShiftedVal / (1.0f + expShiftedVal) : 1.0f);
if (ders && (tid < size)) {
const float derllp = clazz - prob;
const float derllmax = isSingleClass ? 0 : clazz - shiftedProb;
ders[tid] = w * ((1.0f - alpha) * derllp + alpha * derllmax);
}
if (ders2llp && (tid < size)) {
ders2llp[tid] = w * (1.0f - alpha) * prob * (1.0f - prob);
}
float der2llmax = isSingleClass ? 0 : w * alpha * shiftedProb * (1.0f - shiftedProb);
if (ders2llmax && (tid < size)) {
ders2llmax[tid] = der2llmax;
}
if (groupDers2) {
float groupDer2 = 0;
if (!IsSingleClassBlock) {
__syncthreads();
sharedDer2[tid] = der2llmax;
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer2[tid] += sharedDer2[tid + s];
}
__syncthreads();
}
if (localIdx == 0 && tid < size) {
groupDer2 = sharedDer2[tid - localIdx];
}
}
if (localIdx == 0 && tid < size) {
groupDers2[tidQid] = groupDer2;
}
}
}
template <int BlockSize>
__global__ void QueryCrossEntropyImpl(volatile int* qidCursor,
const int qCount,
const float alpha,
const float* targets,
const float* weights,
const float* values,
const int* qids,
const bool* isSingleClassQueries,
const ui32* qOffsets,
const int size,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2) {
while (true) {
int taskQid = 0;
int offset = 0;
int nextTaskOffset = 0;
{
__shared__ int sharedTaskQid;
__shared__ int sharedTaskOffset;
__shared__ int sharedNextTaskOffset;
if (threadIdx.x == 0) {
taskQid = qidCursor[0];
while (true) {
if (taskQid >= qCount) {
break;
}
offset = qOffsets[taskQid];
nextTaskOffset = min(offset + BlockSize, size);
int nextTaskQid = nextTaskOffset < size ? qids[nextTaskOffset] : qCount;
int oldQid = atomicCAS(const_cast<int*>(qidCursor), taskQid, nextTaskQid);
if (oldQid == taskQid) {
nextTaskOffset = qOffsets[nextTaskQid];
break;
} else {
taskQid = oldQid;
}
}
}
if (threadIdx.x == 0) {
sharedTaskQid = taskQid;
sharedTaskOffset = offset;
sharedNextTaskOffset = nextTaskOffset;
}
__syncthreads();
taskQid = sharedTaskQid;
offset = sharedTaskOffset;
nextTaskOffset = sharedNextTaskOffset;
__syncthreads();
}
if (taskQid >= qCount) {
return;
}
const int blockSize = nextTaskOffset - offset;
//we assume, that docs are sorted by isSingleClass mask
//otherwise will be slower for adv-pools
//first part - queries with pairs
//second part - all other queries
bool isSingleClassBlock = threadIdx.x < blockSize ? Ldg(isSingleClassQueries + offset + threadIdx.x) : true;
{
__shared__ float sharedFlags[BlockSize];
sharedFlags[threadIdx.x] = isSingleClassBlock ? 1.0f : 0.0f;
using TOp = TCudaMultiply<float>;
float tmp = FastInBlockReduce<float, TOp>(threadIdx.x, sharedFlags, BlockSize);
if (threadIdx.x == 0) {
sharedFlags[0] = tmp;
}
__syncthreads();
isSingleClassBlock = sharedFlags[0] > 0;
__syncthreads();
}
#define COMPUTE_SINGLE_GROUP(IsSingleClassQuery) \
QueryCrossEntropySingleBlockImpl<BlockSize, IsSingleClassQuery>(alpha, \
targets, weights, values,\
offset, blockSize,\
qids, qOffsets,\
isSingleClassQueries,\
functionValue,\
ders,\
ders2llp,\
ders2llmax,\
groupDers2);
if (isSingleClassBlock) {
COMPUTE_SINGLE_GROUP(true);
} else {
COMPUTE_SINGLE_GROUP(false);
}
__syncthreads();
}
}
void QueryCrossEntropy(int* qidCursor, const int qCount,
const float alpha,
const float* targets,
const float* weights,
const float* values,
const ui32* qids,
const bool* isSingleClassQueries,
const ui32* qOffsets,
const int docCount,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2,
TCudaStream stream)
{
const ui32 maxBlocksPerSm = 4;
const ui32 smCount = TArchProps::SMCount();
const int blockSize = 256;
FillBuffer(qidCursor, 0, 1, stream);
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
QueryCrossEntropyImpl<blockSize> <<<maxBlocksPerSm * smCount, blockSize, 0, stream>>>(qidCursor, qCount, alpha,
targets, weights, values,
(int*)qids, isSingleClassQueries, qOffsets,
docCount,
functionValue,
ders, ders2llp, ders2llmax, groupDers2);
}
__global__ void ComputeQueryLogitMatrixSizesImpl(const ui32* queryOffsets,
const bool* isSingleClassQuery,
ui32 qCount,
ui32* matrixSizes) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const bool isSingleClassFlag = i < qCount ? Ldg(isSingleClassQuery + queryOffsets[i]) : true;
const ui32 qSize = (i < qCount && !isSingleClassFlag) ? queryOffsets[i + 1] - queryOffsets[i] : 0;
if (i <= qCount) {
matrixSizes[i] = qSize * (qSize - 1) / 2;
}
}
void ComputeQueryLogitMatrixSizes(const ui32* queryOffsets,
const bool* isSingleClassQuery,
ui32 qCount,
ui32* matrixSize,
TCudaStream stream) {
const ui32 blockSize = 256;
//matrix count is qCount + 1 (for last index)
const ui32 numBlocks = (qCount + blockSize) / blockSize;
ComputeQueryLogitMatrixSizesImpl<<<numBlocks, blockSize, 0, stream>>>(queryOffsets, isSingleClassQuery, qCount, matrixSize);
}
template <int BlockSize, int ThreadsPerQuery>
__global__ void MakePairsQueryLogitImpl(const ui32* queryOffsets,
const ui32* matrixOffsets,
const bool* isSingleClassQuery,
ui32 queryCount,
uint2* pairs) {
const int queriesPerBlock = BlockSize / ThreadsPerQuery;
const int localQid = threadIdx.x / ThreadsPerQuery;
const int qid = blockIdx.x * queriesPerBlock + localQid;
ui32 queryOffset = qid < queryCount ? queryOffsets[qid] : 0;
const bool singleClassFlag = qid < queryCount ? isSingleClassQuery[queryOffset] : true;
ui32 querySize = (qid < queryCount && !singleClassFlag) ? queryOffsets[qid + 1] - queryOffset : 0;
ui32 matrixOffset = qid < queryCount ? matrixOffsets[qid] : 0;
const int x = threadIdx.x & (ThreadsPerQuery - 1);
const ui32 matrixSize = querySize * (querySize - 1) / 2;
pairs += matrixOffset;
for (int i = x; i < matrixSize; i += ThreadsPerQuery) {
uint2 pair = GetPair(i);
pair.x += queryOffset;
pair.y += queryOffset;
pairs[i] = pair;
}
}
void MakeQueryLogitPairs(const ui32* qOffsets,
const ui32* matrixOffset,
const bool* isSingleFlags,
double meanQuerySize,
ui32 qCount,
uint2* pairs,
TCudaStream stream) {
const int blockSize = 128;
#define MAKE_PAIRS(threadsPerQuery) \
const int numBlocks = (qCount * threadsPerQuery + blockSize - 1) / blockSize; \
if (numBlocks > 0) { \
MakePairsQueryLogitImpl<blockSize, threadsPerQuery> <<< numBlocks, blockSize, 0, stream >>> (qOffsets, matrixOffset, isSingleFlags, qCount, pairs); \
}
if (meanQuerySize < 4) {
MAKE_PAIRS(4)
} else if (meanQuerySize < 8) {
MAKE_PAIRS(8)
} else if (meanQuerySize < 16) {
MAKE_PAIRS(16)
} else {
MAKE_PAIRS(32)
}
#undef MAKE_PAIRS
}
template <int BlockSize, int ThreadsPerQuery>
__global__ void MakeIsSingleClassFlagsImpl(const int* queryOffsets, int queryCount,
const ui32* loadIndices, const float* targets,
bool* isSingleClassQuery) {
int bias = queryCount ? Ldg(queryOffsets) : 0;
auto workingTile = tiled_partition<ThreadsPerQuery>(this_thread_block());
const int queriesPerBlock = BlockSize / ThreadsPerQuery;
const int localQid = threadIdx.x / ThreadsPerQuery;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ ui32 results[BlockSize];
const int queryOffset = (qid < queryCount) ? (queryOffsets[qid] - bias) : 0;
const int querySize = (qid < queryCount) ? (queryOffsets[qid + 1] - bias - queryOffset) : 0;
const ui32 firstIdx = qid < queryCount ? loadIndices[queryOffset] : 0;
float firstTarget = Ldg(targets + firstIdx);
int isSingleClass = 1;
for (int i = workingTile.thread_rank(); i < querySize; i += ThreadsPerQuery) {
const ui32 loadIdx = loadIndices[queryOffset + i];
float docTarget = Ldg(targets + loadIdx);
if (abs(firstTarget - docTarget) > 1e-5f) {
isSingleClass = 0;
}
}
using TOp = TCudaMultiply<int>;
isSingleClass = TileReduce<int, ThreadsPerQuery, TOp>(workingTile, isSingleClass);
if (workingTile.thread_rank() == 0) {
results[localQid] = isSingleClass;
workingTile.sync();
}
isSingleClass = results[localQid];
for (int i = workingTile.thread_rank(); i < querySize; i += ThreadsPerQuery) {
isSingleClassQuery[queryOffset + i] = isSingleClass == 1;
}
}
void MakeIsSingleClassFlags(const float* targets, const ui32* loadIndices,
const ui32* queryOffsets,
ui32 queryCount,
double meanQuerySize,
bool* isSingleClassQuery,
TCudaStream stream) {
const int blockSize = 128;
#define RUN_KERNEL(threadsPerQuery) \
const int numBlocks = (queryCount * threadsPerQuery + blockSize - 1) / blockSize; \
if (numBlocks > 0) { \
MakeIsSingleClassFlagsImpl<blockSize, threadsPerQuery> <<< numBlocks, blockSize, 0, stream >>> ((int*)queryOffsets, queryCount, loadIndices, targets, isSingleClassQuery); \
}
if (meanQuerySize < 2) {
RUN_KERNEL(2)
} else if (meanQuerySize < 4) {
RUN_KERNEL(4)
} else if (meanQuerySize < 8) {
RUN_KERNEL(8)
} else if (meanQuerySize < 16) {
RUN_KERNEL(16)
} else {
RUN_KERNEL(32)
}
#undef RUN_KERNEL
}
//for stochastic gradient
__global__ void FillPairDer2AndRemapPairDocumentsImpl(const float* ders2,
const float* groupDers2,
const ui32* docIds,
const ui32* qids,
ui32 pairCount,
float* pairDer2,
uint2* pairs) {
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + tid;
if (i < pairCount) {
uint2 pair = Ldg(pairs + i);
const float der2x = Ldg(ders2 + pair.x);
const float der2y = Ldg(ders2 + pair.y);
const int qid = Ldg(qids + pair.x);
const float groupDer2 = Ldg(groupDers2 + qid);
pair.x = Ldg(docIds + pair.x);
pair.y = Ldg(docIds + pair.y);
pairDer2[i] = groupDer2 > 1e-20f ? der2x * der2y / (groupDer2 + 1e-20f) : 0;
pairs[i] = pair;
}
}
void FillPairDer2AndRemapPairDocuments(const float* ders2,
const float* groupDers2,
const ui32* docIds,
const ui32* qids,
ui32 pairCount,
float* pairDer2,
uint2* pairs,
TCudaStream stream
) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (numBlocks > 0) {
FillPairDer2AndRemapPairDocumentsImpl<<< numBlocks, blockSize,0, stream >>>(ders2, groupDers2, docIds, qids, pairCount, pairDer2, pairs);
}
}
}
|
9567d3742b729ec0fcb58a466f9cb2e78c6fffb3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <algorithm>
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define TILE_WIDTH 16
// an optimized version of matrix_multiplication which eliminates redundant loads
__global__ void matrix_multiply(int *d_M, int *d_N, int *d_P, size_t width)
{
// create shorthand names for threadIdx & blockIdx
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
// allocate 2D tiles in __shared__ memory
__shared__ int s_M[TILE_WIDTH][TILE_WIDTH];
__shared__ int s_N[TILE_WIDTH][TILE_WIDTH];
// calculate the row & column index of the element
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
int result = 0;
// loop over the tiles of the input in phases
for(int p = 0; p < width/TILE_WIDTH; ++p)
{
// collaboratively load tiles into __shared__
s_M[ty][tx] = d_M[row*width + (p*TILE_WIDTH + tx)];
s_N[ty][tx] = d_N[(p*TILE_WIDTH + ty)*width + col];
// wait until all data is loaded before allowing
// any thread in this block to continue
__syncthreads();
// do dot product between row of s_a and column of s_b
for(int k = 0; k < TILE_WIDTH; ++k)
{
result += s_M[ty][k] * s_N[k][tx];
}
// wait until all threads are finished with the data
// before allowing any thread in this block to continue
__syncthreads();
}
// write out this thread's result
d_P[row*width+col] = result;
}
void MatrixMulOnHost(int* M, int* N, int* P, int Width)
{
for (int i = 0; i < Width; ++i) {
for (int j = 0; j < Width; ++j) {
double sum = 0;
for (int k = 0; k < Width; ++k) {
double a = M[i * Width + k];
double b = N[k * Width + j];
sum += a * b;
}
P[i * Width + j] = sum;
}
}
}
int main(void)
{
// create a large workload so we can easily measure the
// performance difference of both implementations
// note that n measures the width of the matrix, not the number of total elements
//const size_t n = 1<<10;
const size_t n = 1024;
std::cout << "Total element is " << n << "\n";
const dim3 block_size(TILE_WIDTH,TILE_WIDTH);
const dim3 num_blocks(n / block_size.x, n / block_size.y);
// generate random input on the host
std::vector<int> h_a(n*n), h_b(n*n), h_c(n*n);
for(int i = 0; i < n*n; ++i)
{
h_a[i] = static_cast<int>(rand()) / RAND_MAX;
h_b[i] = static_cast<int>(rand()) / RAND_MAX;
}
// allocate storage for the device
int *d_a = 0, *d_b = 0, *d_c = 0;
hipMalloc((void**)&d_a, sizeof(int) * n * n);
hipMalloc((void**)&d_b, sizeof(int) * n * n);
hipMalloc((void**)&d_c, sizeof(int) * n * n);
// copy input to the device
hipMemcpy(d_a, &h_a[0], sizeof(int) * n * n, hipMemcpyHostToDevice);
hipMemcpy(d_b, &h_b[0], sizeof(int) * n * n, hipMemcpyHostToDevice);
// again, launch a single "warm-up" kernel
hipLaunchKernelGGL(( matrix_multiply), dim3(num_blocks),dim3(block_size), 0, 0, d_a, d_b, d_c, n);
// copy result back to the host
hipMemcpy(&h_c[0], d_c, sizeof(int) * n * n, hipMemcpyDeviceToHost);
//------------------
int* h_r;
h_r = (int*)malloc(sizeof(int) * n * n);
MatrixMulOnHost(&h_a[0], &h_b[0], h_r, n);
for (int i=0; i<(n*n); i++) {
if (h_r[i] != h_c[i]) {
std::cout << "Failed at i " << i << "h_r=" << h_r[i] << ",h_c=" << h_c[i] << "\n";
exit(1);
}
}
std::cout << "Result is correct.";
// deallocate device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
|
9567d3742b729ec0fcb58a466f9cb2e78c6fffb3.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <algorithm>
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define TILE_WIDTH 16
// an optimized version of matrix_multiplication which eliminates redundant loads
__global__ void matrix_multiply(int *d_M, int *d_N, int *d_P, size_t width)
{
// create shorthand names for threadIdx & blockIdx
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
// allocate 2D tiles in __shared__ memory
__shared__ int s_M[TILE_WIDTH][TILE_WIDTH];
__shared__ int s_N[TILE_WIDTH][TILE_WIDTH];
// calculate the row & column index of the element
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
int result = 0;
// loop over the tiles of the input in phases
for(int p = 0; p < width/TILE_WIDTH; ++p)
{
// collaboratively load tiles into __shared__
s_M[ty][tx] = d_M[row*width + (p*TILE_WIDTH + tx)];
s_N[ty][tx] = d_N[(p*TILE_WIDTH + ty)*width + col];
// wait until all data is loaded before allowing
// any thread in this block to continue
__syncthreads();
// do dot product between row of s_a and column of s_b
for(int k = 0; k < TILE_WIDTH; ++k)
{
result += s_M[ty][k] * s_N[k][tx];
}
// wait until all threads are finished with the data
// before allowing any thread in this block to continue
__syncthreads();
}
// write out this thread's result
d_P[row*width+col] = result;
}
void MatrixMulOnHost(int* M, int* N, int* P, int Width)
{
for (int i = 0; i < Width; ++i) {
for (int j = 0; j < Width; ++j) {
double sum = 0;
for (int k = 0; k < Width; ++k) {
double a = M[i * Width + k];
double b = N[k * Width + j];
sum += a * b;
}
P[i * Width + j] = sum;
}
}
}
int main(void)
{
// create a large workload so we can easily measure the
// performance difference of both implementations
// note that n measures the width of the matrix, not the number of total elements
//const size_t n = 1<<10;
const size_t n = 1024;
std::cout << "Total element is " << n << "\n";
const dim3 block_size(TILE_WIDTH,TILE_WIDTH);
const dim3 num_blocks(n / block_size.x, n / block_size.y);
// generate random input on the host
std::vector<int> h_a(n*n), h_b(n*n), h_c(n*n);
for(int i = 0; i < n*n; ++i)
{
h_a[i] = static_cast<int>(rand()) / RAND_MAX;
h_b[i] = static_cast<int>(rand()) / RAND_MAX;
}
// allocate storage for the device
int *d_a = 0, *d_b = 0, *d_c = 0;
cudaMalloc((void**)&d_a, sizeof(int) * n * n);
cudaMalloc((void**)&d_b, sizeof(int) * n * n);
cudaMalloc((void**)&d_c, sizeof(int) * n * n);
// copy input to the device
cudaMemcpy(d_a, &h_a[0], sizeof(int) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b[0], sizeof(int) * n * n, cudaMemcpyHostToDevice);
// again, launch a single "warm-up" kernel
matrix_multiply<<<num_blocks,block_size>>>(d_a, d_b, d_c, n);
// copy result back to the host
cudaMemcpy(&h_c[0], d_c, sizeof(int) * n * n, cudaMemcpyDeviceToHost);
//------------------
int* h_r;
h_r = (int*)malloc(sizeof(int) * n * n);
MatrixMulOnHost(&h_a[0], &h_b[0], h_r, n);
for (int i=0; i<(n*n); i++) {
if (h_r[i] != h_c[i]) {
std::cout << "Failed at i " << i << "h_r=" << h_r[i] << ",h_c=" << h_c[i] << "\n";
exit(1);
}
}
std::cout << "Result is correct.";
// deallocate device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
7b04a41a97d722e2487e37697cfaff08157a8a26.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_shared_worker.cuh"
__global__ void gpu_shared_updated_filter(unsigned char * image_origininal, unsigned char * image_result, unsigned int width, unsigned int height, int division_coef)
{
int current_width = blockIdx.y * blockDim.y + threadIdx.y;
int current_height = blockIdx.x * blockDim.x + threadIdx.x;
int filter[3][3] =
{
{ 1,-2,1 },{ -2,5,-2 },{ 1,-2,1 }
};
__shared__ unsigned char block[2][32];
block[threadIdx.x][threadIdx.y] = (
(
image_origininal[current_height *(width + 2) + current_width ] * (filter[0][0])
+ image_origininal[(current_height )*(width + 2) + (current_width + 1)] * (filter[0][1])
+ image_origininal[(current_height )*(width + 2) + (current_width + 2)] * (filter[0][2])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width )] * (filter[1][0])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width + 1)] * (filter[1][1])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width + 2)] * (filter[1][2])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width )] * (filter[2][0])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width + 1)] * (filter[2][1])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width + 2)] * (filter[2][2])
)
/ division_coef
);
image_result[current_height * width + current_width] = block[threadIdx.x][threadIdx.y];
}
Result perform_GPU_shared_updated_worker(Task task)
{
hipEvent_t start_time, stop_time;
hipEventCreate(&start_time);
hipEventCreate(&stop_time);
unsigned char* image_original;
unsigned char* image_result;
auto cuda_status =
hipMalloc((void**)(&image_original),
(task.image.matrix.height) * (task.image.matrix.width) * sizeof(unsigned char));
if (cuda_status != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
exit(EXIT_FAILURE);
}
cuda_status = hipMemcpy(image_original,
task.image.matrix.matrix,
(task.image.matrix.height) * (task.image.matrix.width) *
sizeof(unsigned char), hipMemcpyHostToDevice);
if (cuda_status != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
exit(EXIT_FAILURE);
}
cuda_status =
hipMalloc((void**)(&image_result),
(task.work_matrix.height) * (task.work_matrix.width) * sizeof(unsigned char));
if (cuda_status != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
exit(EXIT_FAILURE);
}
dim3 block(2, 32);
dim3 grid;
grid.x = task.work_matrix.height / block.x;
if (task.work_matrix.height % block.x != 0)
grid.x += 1;
grid.y = task.work_matrix.width / block.y;
if (task.work_matrix.width % block.y != 0)
grid.y += 1;
hipEventRecord(start_time);
gpu_shared_updated_filter << <grid, block >> > (image_original, image_result, task.work_matrix.width, task.work_matrix.height, task.division_coef);
hipDeviceSynchronize();
hipEventRecord(stop_time);
hipEventSynchronize(stop_time);
Result result;
hipEventElapsedTime(&result.time, start_time, stop_time);
cuda_status = hipMemcpy(task.work_matrix.matrix,
image_result,
(task.work_matrix.height) * (task.work_matrix.width) * sizeof(unsigned char), hipMemcpyDeviceToHost);
if (cuda_status != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
exit(EXIT_FAILURE);
}
result.result = task.work_matrix;
hipEventElapsedTime(&result.time, start_time, stop_time);
return result;
}
|
7b04a41a97d722e2487e37697cfaff08157a8a26.cu
|
#include "gpu_shared_worker.cuh"
__global__ void gpu_shared_updated_filter(unsigned char * image_origininal, unsigned char * image_result, unsigned int width, unsigned int height, int division_coef)
{
int current_width = blockIdx.y * blockDim.y + threadIdx.y;
int current_height = blockIdx.x * blockDim.x + threadIdx.x;
int filter[3][3] =
{
{ 1,-2,1 },{ -2,5,-2 },{ 1,-2,1 }
};
__shared__ unsigned char block[2][32];
block[threadIdx.x][threadIdx.y] = (
(
image_origininal[current_height *(width + 2) + current_width ] * (filter[0][0])
+ image_origininal[(current_height )*(width + 2) + (current_width + 1)] * (filter[0][1])
+ image_origininal[(current_height )*(width + 2) + (current_width + 2)] * (filter[0][2])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width )] * (filter[1][0])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width + 1)] * (filter[1][1])
+ image_origininal[(current_height + 1)*(width + 2) + (current_width + 2)] * (filter[1][2])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width )] * (filter[2][0])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width + 1)] * (filter[2][1])
+ image_origininal[(current_height + 2)*(width + 2) + (current_width + 2)] * (filter[2][2])
)
/ division_coef
);
image_result[current_height * width + current_width] = block[threadIdx.x][threadIdx.y];
}
Result perform_GPU_shared_updated_worker(Task task)
{
cudaEvent_t start_time, stop_time;
cudaEventCreate(&start_time);
cudaEventCreate(&stop_time);
unsigned char* image_original;
unsigned char* image_result;
auto cuda_status =
cudaMalloc((void**)(&image_original),
(task.image.matrix.height) * (task.image.matrix.width) * sizeof(unsigned char));
if (cuda_status != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
exit(EXIT_FAILURE);
}
cuda_status = cudaMemcpy(image_original,
task.image.matrix.matrix,
(task.image.matrix.height) * (task.image.matrix.width) *
sizeof(unsigned char), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
exit(EXIT_FAILURE);
}
cuda_status =
cudaMalloc((void**)(&image_result),
(task.work_matrix.height) * (task.work_matrix.width) * sizeof(unsigned char));
if (cuda_status != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
exit(EXIT_FAILURE);
}
dim3 block(2, 32);
dim3 grid;
grid.x = task.work_matrix.height / block.x;
if (task.work_matrix.height % block.x != 0)
grid.x += 1;
grid.y = task.work_matrix.width / block.y;
if (task.work_matrix.width % block.y != 0)
grid.y += 1;
cudaEventRecord(start_time);
gpu_shared_updated_filter << <grid, block >> > (image_original, image_result, task.work_matrix.width, task.work_matrix.height, task.division_coef);
cudaDeviceSynchronize();
cudaEventRecord(stop_time);
cudaEventSynchronize(stop_time);
Result result;
cudaEventElapsedTime(&result.time, start_time, stop_time);
cuda_status = cudaMemcpy(task.work_matrix.matrix,
image_result,
(task.work_matrix.height) * (task.work_matrix.width) * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cuda_status != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
exit(EXIT_FAILURE);
}
result.result = task.work_matrix;
cudaEventElapsedTime(&result.time, start_time, stop_time);
return result;
}
|
4ce21a81e90a851deb600211cd62e2aaa01c19e1.hip
|
// !!! This is a file automatically generated by hipify!!!
// cd /home/hork/cuda-workspace/CudaSHA256/Debug/files
// time ~/Dropbox/FIIT/APS/Projekt/CpuSHA256/a.out -f ../file-list
// time ../CudaSHA256 -f ../file-list
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include "sha256.cuh"
#include <dirent.h>
#include <ctype.h>
#include <sys/time.h>
#define N 20000
#define BLOCKSIZE 512
#define M 4000000000/N
void string2ByteArray(char* input, BYTE* output)
{
uint32_t loop;
uint32_t i;
loop = 0;
i = 0;
while(input[loop] != '\0')
{
output[i++] = input[loop++];
}
}
uint32_t LitToBigEndian(uint32_t x)
{
return (((x>>24) & 0x000000ff) | ((x>>8) & 0x0000ff00) | ((x<<8) & 0x00ff0000) | ((x<<24) & 0xff000000));
}
__global__ void sha256_cuda(JOB ** jobs, uint32_t n, uint32_t j, OUT * outs) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t stride = blockDim.x * gridDim.x;
for (uint32_t i = index; i < n; i += stride){
SHA256_CTX ctx;
jobs[i]->data2[3] = j*n+i;
sha256_transform(&ctx, jobs[i]->data1, jobs[i]->digest);
}
}
void pre_sha256() {
// compy symbols
checkCudaErrors(hipMemcpyToSymbol(dev_k, host_k, sizeof(host_k), 0, hipMemcpyHostToDevice));
}
void runJobs(JOB ** jobs, uint32_t n, uint32_t j, OUT * outs){
uint32_t blockSize = BLOCKSIZE;
uint32_t numBlocks = (n + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( sha256_cuda) , dim3(numBlocks), dim3(blockSize) , 0, 0, jobs, n, j,outs);
//sha256_cuda <<< 1, 1 >>> (jobs, n, j, outs);
//sha256_cuda <<< 1, 16 >>> (jobs, n);
}
JOB * JOB_init(const WORD data1[]) {
JOB * j;
checkCudaErrors(hipMallocManaged(&j, sizeof(JOB)));
for (uint32_t i = 0; i < 16; i++)
{
j->data1[i] = data1[i];
}
return j;
}
int main(void)
{
JOB ** jobs;
OUT * outs;
uint32_t i,j;
clock_t start, end;
double cpu_time_used;
int GPU_N;
start = clock();
checkCudaErrors(hipGetDeviceCount(&GPU_N));
checkCudaErrors(hipSetDevice(GPU_N-1));
//sha256_transform_0(&ctx1, Word1, buf1);
checkCudaErrors(hipMallocManaged(&jobs, N * sizeof(JOB *)));
for (i=0; i < N; ++i){
WORD Word1[16] = {i, i+1, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000040};
jobs[i] = JOB_init(Word1);
}
for(j = 0; j <M; ++j){
pre_sha256();
runJobs(jobs, N, j, outs);
}
hipDeviceSynchronize();
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("*Execution Time of 2^32 hashes on GPU : %f seconds\n", cpu_time_used);
hipDeviceReset();
return 0;
}
|
4ce21a81e90a851deb600211cd62e2aaa01c19e1.cu
|
// cd /home/hork/cuda-workspace/CudaSHA256/Debug/files
// time ~/Dropbox/FIIT/APS/Projekt/CpuSHA256/a.out -f ../file-list
// time ../CudaSHA256 -f ../file-list
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <cuda.h>
#include "sha256.cuh"
#include <dirent.h>
#include <ctype.h>
#include <sys/time.h>
#define N 20000
#define BLOCKSIZE 512
#define M 4000000000/N
void string2ByteArray(char* input, BYTE* output)
{
uint32_t loop;
uint32_t i;
loop = 0;
i = 0;
while(input[loop] != '\0')
{
output[i++] = input[loop++];
}
}
uint32_t LitToBigEndian(uint32_t x)
{
return (((x>>24) & 0x000000ff) | ((x>>8) & 0x0000ff00) | ((x<<8) & 0x00ff0000) | ((x<<24) & 0xff000000));
}
__global__ void sha256_cuda(JOB ** jobs, uint32_t n, uint32_t j, OUT * outs) {
uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t stride = blockDim.x * gridDim.x;
for (uint32_t i = index; i < n; i += stride){
SHA256_CTX ctx;
jobs[i]->data2[3] = j*n+i;
sha256_transform(&ctx, jobs[i]->data1, jobs[i]->digest);
}
}
void pre_sha256() {
// compy symbols
checkCudaErrors(cudaMemcpyToSymbol(dev_k, host_k, sizeof(host_k), 0, cudaMemcpyHostToDevice));
}
void runJobs(JOB ** jobs, uint32_t n, uint32_t j, OUT * outs){
uint32_t blockSize = BLOCKSIZE;
uint32_t numBlocks = (n + blockSize - 1) / blockSize;
sha256_cuda <<< numBlocks, blockSize >>> (jobs, n, j,outs);
//sha256_cuda <<< 1, 1 >>> (jobs, n, j, outs);
//sha256_cuda <<< 1, 16 >>> (jobs, n);
}
JOB * JOB_init(const WORD data1[]) {
JOB * j;
checkCudaErrors(cudaMallocManaged(&j, sizeof(JOB)));
for (uint32_t i = 0; i < 16; i++)
{
j->data1[i] = data1[i];
}
return j;
}
int main(void)
{
JOB ** jobs;
OUT * outs;
uint32_t i,j;
clock_t start, end;
double cpu_time_used;
int GPU_N;
start = clock();
checkCudaErrors(cudaGetDeviceCount(&GPU_N));
checkCudaErrors(cudaSetDevice(GPU_N-1));
//sha256_transform_0(&ctx1, Word1, buf1);
checkCudaErrors(cudaMallocManaged(&jobs, N * sizeof(JOB *)));
for (i=0; i < N; ++i){
WORD Word1[16] = {i, i+1, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000040};
jobs[i] = JOB_init(Word1);
}
for(j = 0; j <M; ++j){
pre_sha256();
runJobs(jobs, N, j, outs);
}
cudaDeviceSynchronize();
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("*Execution Time of 2^32 hashes on GPU : %f seconds\n", cpu_time_used);
cudaDeviceReset();
return 0;
}
|
8f1fa5492df55bffbb8e06bd83899434ebb5de55.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mycommon.h"
#define BSIZE 256
__constant__ int nfeat;
__constant__ int ntrain;
__constant__ int ntest;
__constant__ int nclass;
__constant__ int k;
__constant__ int nnegibor;
__constant__ double mu;
__constant__ double nu;
__constant__ int idx_o;
__constant__ int *target;
__constant__ double *km_train;
__constant__ double *km_test;
__constant__ double *O[2];
__constant__ double *t_target;
__constant__ double *t_triplet;
__constant__ double *t_update;
__constant__ double *t_gradient;
__constant__ short *label_train;
__constant__ short *label_test;
__constant__ struct Inst *grouped_inst;
__constant__ unsigned typecount[4];
__constant__ int *position_index;
__constant__ double *dist_target;
__constant__ double *dist1;
__constant__ double *dist2;
__constant__ double *hinge_val;
__constant__ double *dist_knn;
__constant__ int *ino_knn;
__constant__ int *neighbor_knn;
__device__ double f_val;
__device__ double sub_fval[84];
__device__ double acc_knn;
__device__ int hits[4];
__device__ void kernelMatrix(double *km, double *d1, int n1, double *d2, int n2){
int ub = n1 * n2;
int stride = blockDim.x * gridDim.x;
double c_val;
int i, j;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ub; m += stride){
i = m / n2;
j = m % n2;
c_val = .0;
for (int n = 0; n < nfeat; ++ n)
c_val += pow(d1[n * n1 + i] - d2[n * n2 + j], 2);
km[m] = exp(-c_val / nfeat);
}
}
__global__ void calcKM(double *train, double *test){
kernelMatrix(km_train, train, ntrain, train, ntrain);
kernelMatrix(km_test, test, ntest, train, ntrain);
}
__device__ double getElement(double *m, int i, int j, int stride){
return *(m + i * stride + j);
}
__device__ void setElement(double *m, int i, int j, int stride, double val){
m[i * stride + j] = val;
}
__device__ int getElementInt(int *m, int i, int j, int stride){
return *(m + i * stride + j);
}
__device__ void setElementInt(int *m, int i, int j, int stride, int val){
m[i * stride + j] = val;
}
__device__ int getTargetDist(int i, int kk){
return dist_target[i * k + kk];
}
__device__ double calcDist(int i, double *km1, int j, double *km2){
int tid = threadIdx.x;
__shared__ double diff_k[256];
__shared__ double sum[256];
__shared__ double norm[64];
if (tid < 64)
norm[tid] = .0;
int pos;
for (int m = 0; m < (ntrain - 1)/blockDim.x + 1; ++ m){
__syncthreads();
pos = m * blockDim.x + tid;
if (pos < ntrain)
diff_k[tid] = getElement(km1, i, pos, ntrain) - getElement(km2, j, pos, ntrain);
for (int d = 0; d < nfeat; ++ d){
__syncthreads();
if (pos < ntrain)
sum[tid] = getElement(O[idx_o], d, pos, ntrain) * diff_k[tid];
else
sum[tid] = .0;
int stride = blockDim.x/2;
while (stride > 0){
__syncthreads();
if (tid < stride)
sum[tid] += sum[tid + stride];
stride /= 2;
}
__syncthreads();
if (tid == 0)
norm[d] += sum[0];
}
}
if (tid < nfeat)
norm[tid] = norm[tid]*norm[tid];
__syncthreads();
double s = .0;
for (int d = 0; d < nfeat; ++ d)
s += norm[d];
return s;
}
__device__ void calcTargetDist(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int i, j;
if (tid == 0)
sub_fval[bid] = .0;
for(int m = bid; m < ntrain * k; m += gridDim.x){
i = m / k;
j = target[m];
double val = calcDist(i, km_train, j, km_train);
if (tid == 0){
dist_target[m] = val;
sub_fval[bid] += val;
}
}
}
__device__ void updateDist(double *dist, struct Inst * inst1, int height, struct Inst * inst2, int width){
int tid = threadIdx.x;
int bid = blockIdx.x;
int i, j;
for (int m = bid; m < height * width; m += gridDim.x){
i = inst1[m / width].ino;
j = inst2[m % width].ino;
double val = calcDist(i, km_train, j, km_train);
if (tid == 0)
dist[m] = val;
}
}
__global__ void update2(){
calcTargetDist();
updateDist(dist1, grouped_inst, typecount[0], grouped_inst + typecount[0], typecount[1]);
}
__device__ int getTarget(int i, int kk){
return target[i * k + kk];
}
__global__ void zeroHinge(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < typecount[0] * typecount[1] * 2 * k; m += size)
hinge_val[m] = .0;
}
__device__ double hinge(double s){
if (s <= -1.0)
return .0;
else if (s >= 0)
return 1.0;
else
return 1 + s;
}
__device__ void updateTri(int idx1, int idx2, int idx3, double h){
__syncthreads();
for (int p = threadIdx.x; p < ntrain; p += blockDim.x)
t_triplet[p * ntrain + idx1] += h * (getElement(km_train, idx2, p, ntrain) - getElement(km_train, idx3, p, ntrain));
}
__global__ void zeroT_triplet(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < ntrain * ntrain; m += size)
t_triplet[m] = .0;
}
__global__ void update3_2(){
int bid = blockIdx.x;
int i, j, l;
double vdist, h;
if (bid == 0 && threadIdx.x == 0)
f_val = .0;
for (int m = 0; m < typecount[TN] * typecount[TP]; ++ m){
for (int kk = 0; kk < k; ++ kk){
i = grouped_inst[m / typecount[TP]].ino;
l = grouped_inst[typecount[TN] + m % typecount[TP]].ino;
j = getTarget(i, kk);
vdist = 1 + getElement(dist_target, i, kk, k) - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
if (label_train[i] == TP)
h *= nu;
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
l = grouped_inst[m / typecount[TP]].ino;
i = grouped_inst[typecount[TN] + m % typecount[TP]].ino;
j = getTarget(i, kk);
vdist = 1 + getElement(dist_target, i, kk, k) - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
if (label_train[i] == TP)
h *= nu;
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
}
}
}
__global__ void update3_3(){
int bid = blockIdx.x;
int i, j, l;
double vdist, h, *h_addr;
if (bid == 0 && threadIdx.x == 0)
f_val = .0;
for (int m = 0; m < typecount[TN] * typecount[TP]; ++ m){
for (int kk = 0; kk < k; ++ kk){
i = grouped_inst[m / typecount[TP]].ino;
l = grouped_inst[typecount[TN] + m % typecount[TP]].ino;
j = getTarget(i, kk);
vdist = 1 + getElement(dist_target, i, kk, k) - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h_addr = hinge_val + m * 2 * k + kk;
h = hinge(vdist);
if (h != (*h_addr)){
if (i % gridDim.x == bid)
updateTri(i, l, j, h - (*h_addr));
if (j % gridDim.x == bid)
updateTri(j, j, i, h - (*h_addr));
if (l % gridDim.x == bid)
updateTri(l, i, l, h - (*h_addr));
}
(*h_addr) = h;
l = grouped_inst[m / typecount[TP]].ino;
i = grouped_inst[typecount[TN] + m % typecount[TP]].ino;
j = getTarget(i, kk);
vdist = 1 + getElement(dist_target, i, kk, k) - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h_addr = hinge_val + m * 2 * k + k + kk;
h = hinge(vdist);
if (h != (*h_addr)){
if (i % gridDim.x == bid)
updateTri(i, l, j, h - (*h_addr));
if (j % gridDim.x == bid)
updateTri(j, j, i, h - (*h_addr));
if (l % gridDim.x == bid)
updateTri(l, i, l, h - (*h_addr));
}
(*h_addr) = h;
}
}
}
__global__ void calcFval(){
if (blockIdx.x == 0 && threadIdx.x == 0)
for (int i = 0; i < gridDim.x; ++ i)
f_val += sub_fval[i];
}
__global__ void updateUpdateTerm(double alpha){
int size = gridDim.x * blockDim.x;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ntrain * ntrain; m += size){
if (m/ntrain == m%ntrain)
t_update[m] = 1 - 2 * alpha * (t_target[m] + mu * t_triplet[m]);
//t_update[m] = 1 - 2 * alpha * ((1-mu) * t_target[m] + mu * t_triplet[m]);
else
t_update[m] = - 2 * alpha * (t_target[m] + mu * t_triplet[m]);
//t_update[m] = - 2 * alpha * ((1-mu) * t_target[m] + mu * t_triplet[m]);
}
}
__global__ void zeroO(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < nfeat * ntrain; m += size)
O[1 - idx_o][m] = .0;
}
__global__ void updateO(){
int tid = threadIdx.x;
int bid_row = blockIdx.x;
int bid_col = blockIdx.y;
__shared__ double o_element[BSIZE];
if (bid_col * BSIZE + tid < ntrain){
for (int start = 0; start < ntrain; start += BSIZE){
int len = min(BSIZE, ntrain - start);
if(tid < len)
o_element[tid] = getElement(O[idx_o], bid_row, start + tid, ntrain);
__syncthreads();
for (int i = 0; i < len; ++ i){
double val = o_element[i] * getElement(t_update, i + start, bid_col * BSIZE + tid, ntrain);
//__syncthreads();
O[1 - idx_o][bid_row * ntrain + bid_col * BSIZE + tid] += val;
}
}
}
}
__global__ void updateO1(){
int tid = threadIdx.x;
int bid_row = blockIdx.x;
int bid_col = blockIdx.y;
int workingtid = min(BSIZE, ntrain - bid_col * BSIZE);
if (tid < workingtid)
O[1 - idx_o][bid_row * ntrain + bid_col * BSIZE + tid] = .0;
//__shared__ double o_element[BSIZE];
for (int start = 0; start < ntrain; start += BSIZE){
int len = min(BSIZE, ntrain - start);
//if(tid < len)
// o_element[tid] = getElement(O[idx_o], bid_row, start + tid, ntrain);
//__syncthreads();
for (int i = 0; i < len; ++ i){
if (tid < workingtid){
double val = getElement(O[idx_o], bid_row, start + i, ntrain) * getElement(t_update, i + start, bid_col * BSIZE + tid, ntrain);
//double val = o_element[i] * getElement(t_update, i + start, bid_col * BSIZE + tid, ntrain);
//__syncthreads();
O[1 - idx_o][bid_row * ntrain + bid_col * BSIZE + tid] += val;
}
}
}
}
__global__ void knnUpdateDist(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x;
for(int m = bid; m < ntest * ntrain; m += size){
int i = m / ntrain;
int j = m % ntrain;
double d = calcDist(i, km_test, j, km_train);
if (tid == 0){
ino_knn[m] = j;
dist_knn[m] = d;
}
}
}
// lauched with # block = ntest
__global__ void knnFindNeighbor(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int len = ntrain / BSIZE;
int start = tid * len;
if (tid < ntrain % BSIZE){
start += tid;
++ len;
}
else
start += ntrain % BSIZE;
__syncthreads();
//int b = min(len, k);
int b = min(len, nnegibor);
for (int i = 0; i < b; ++ i)
for (int j = start; j < start + len - i - 1; ++ j)
if(getElement(dist_knn, bid, j, ntrain) < getElement(dist_knn, bid, j + 1, ntrain)){
double tmp_dist = getElement(dist_knn, bid, j, ntrain);
setElement(dist_knn, bid, j, ntrain, getElement(dist_knn, bid, j + 1, ntrain));
setElement(dist_knn, bid, j + 1, ntrain, tmp_dist);
int tmp_ino = getElementInt(ino_knn, bid, j, ntrain);
setElementInt(ino_knn, bid, j, ntrain, getElementInt(ino_knn, bid, j + 1, ntrain));
setElementInt(ino_knn, bid, j + 1, ntrain, tmp_ino);
}
__syncthreads();
__shared__ double dist[BSIZE];
__shared__ int ino[BSIZE];
__shared__ int shortest[BSIZE];
int p = start + len -1;
//for (int i = 0; i < k; ++ i){
for (int i = 0; i < nnegibor; ++ i){
if (b > 0){
dist[tid] = getElement(dist_knn, bid, p, ntrain);
ino[tid] = getElementInt(ino_knn, bid, p, ntrain);
}
else
dist[tid] = DBL_MAX;
shortest[tid] = tid;
int stride = blockDim.x/2;
while (stride > 0){
__syncthreads();
if (tid < stride){
if (dist[tid] > dist[tid + stride]){
dist[tid] = dist[tid + stride];
ino[tid] = ino[tid + stride];
shortest[tid] = shortest[tid + stride];
}
}
stride /= 2;
}
__syncthreads();
if(tid == 0)
//setElementInt(neighbor_knn, bid, i, k, ino[0]);
setElementInt(neighbor_knn, bid, i, nnegibor, ino[0]);
if(tid == shortest[0]){
-- b;
-- p;
}
}
}
__global__ void knnMatching(){
//int ub = ntest * k;
int ub = ntest * nnegibor;
int stride = blockDim.x * gridDim.x;
int idx_test, idx_train;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ub; m += stride){
//idx_test = m / k;
idx_test = m / nnegibor;
idx_train = neighbor_knn[m];
if (label_test[idx_test] == label_train[idx_train])
neighbor_knn[m] = 1;
else
neighbor_knn[m] = 0;
}
}
// lauch with single block
__global__ void knnAcc(int neiborhood_size){
int tid = threadIdx.x;
int stride = blockDim.x;
if (tid < 4)
hits[tid] = 0;
__shared__ int matched[BSIZE];
matched[tid] = 0;
for (int m = tid; m < ntest; m += stride){
int nsametype = 0;
for (int i = 0; i < neiborhood_size; ++ i)
//nsametype += neighbor_knn[m * k + i];
nsametype += neighbor_knn[m * nnegibor + i];
if (nsametype > neiborhood_size/2){
matched[tid] += 1;
if (label_test[m] == TP)
atomicAdd(&hits[TP], 1);
}
else{
if (label_test[m] == TN)
atomicSub(&hits[TN], 1);
}
}
int stride1 = blockDim.x/2;
while (stride1 > 0){
__syncthreads();
if (tid < stride1)
matched[tid] += matched[tid + stride1];
stride1 /= 2;
}
__syncthreads();
if (tid ==0)
acc_knn = 1.0 * matched[0] / ntest;
}
void deviceInitKernelMatrix(int *trainninst, int *testninst, int *nf, double *traindata, double *testdata){
hipMemcpyToSymbol(ntrain, trainninst, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(ntest, testninst, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nfeat, nf, sizeof(int), 0, hipMemcpyHostToDevice);
double *d_train_data, *d_test_data;
hipMalloc((void **)&d_train_data, sizeof(double) * (*trainninst) * (*nf));
hipMalloc((void **)&d_test_data, sizeof(double) * (*testninst) * (*nf));
hipMemcpy(d_train_data, traindata, sizeof(double) * (*trainninst) * (*nf), hipMemcpyHostToDevice);
hipMemcpy(d_test_data, testdata, sizeof(double) * (*testninst) * (*nf), hipMemcpyHostToDevice);
double *d_kernel_matrix_train, *d_kernel_matrix_test;
hipMalloc((void **)&d_kernel_matrix_train, sizeof(double) * (*trainninst) * (*trainninst));
hipMemcpyToSymbol(km_train, &d_kernel_matrix_train, sizeof(double*), 0, hipMemcpyHostToDevice);
hipMalloc((void **)&d_kernel_matrix_test, sizeof(double) * (*testninst) * (*trainninst));
hipMemcpyToSymbol(km_test, &d_kernel_matrix_test, sizeof(double*), 0, hipMemcpyHostToDevice);
// Run the event recording
hipEvent_t start_event, stop_event;
hipEventCreate(&start_event) ;
hipEventCreate(&stop_event) ;
hipEventRecord(start_event, 0);
hipLaunchKernelGGL(( calcKM), dim3(84), dim3(256), 0, 0, d_train_data, d_test_data);
hipDeviceSynchronize();
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipFree(d_train_data);
hipFree(d_test_data);
}
void deviceInitTarget(int *h_target, int trainninst, int *kk, int *nc){
int *d_target;
hipMalloc((void **)&d_target, sizeof(int) * trainninst * (*kk));
hipMemcpy(d_target, h_target, sizeof(int) * trainninst * (*kk), hipMemcpyHostToDevice);
hipMemcpyToSymbol(target, &d_target, sizeof(int*), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(k, kk, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nclass, nc, sizeof(int), 0, hipMemcpyHostToDevice);
}
void deviceInitLabelTrain(struct Inst *inst, unsigned ninst){
short *label = new short[ninst];
for (int i = 0; i < ninst; ++ i)
label[i] = inst[i].label;
short *d_label;
hipMalloc((void **)&d_label, sizeof(short) * ninst);
hipMemcpy(d_label, label, sizeof(short) * ninst, hipMemcpyHostToDevice);
hipMemcpyToSymbol(label_train, &d_label, sizeof(short*), 0, hipMemcpyHostToDevice);
delete[] label;
}
void deviceInitLabelTest(struct Inst *inst, unsigned ninst){
short *label = new short[ninst];
for (int i = 0; i < ninst; ++ i)
label[i] = inst[i].label;
short *d_label;
hipMalloc((void **)&d_label, sizeof(short) * ninst);
hipMemcpy(d_label, label, sizeof(short) * ninst, hipMemcpyHostToDevice);
hipMemcpyToSymbol(label_test, &d_label, sizeof(short*), 0, hipMemcpyHostToDevice);
delete[] label;
}
void deviceInitInstList(struct Inst *inst, unsigned *count, unsigned ninst, int nc, int kk){
hipMemcpyToSymbol(typecount, count, sizeof(unsigned) * 4, 0, hipMemcpyHostToDevice);
struct Inst *gi[4];
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
gi[i] = (struct Inst *)malloc(sizeof(struct Inst) * count[i]);
}
//int *index = new int[ninst];
int p[4] = {0, 0, 0, 0};
for(int i = 0; i < ninst; ++ i){
int type = inst[i].label;
gi[type][p[type]].ino = inst[i].ino;
gi[type][p[type]].label = inst[i].label;
//index[i] = p[type];
//for(int j = 0; j < inst[i].label; ++ j)
//index[i] += count[j];
++ p[type];
}
struct Inst *d_inst;
hipMalloc((void **)&d_inst, sizeof(struct Inst) * ninst);
unsigned start = 0;
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
hipMemcpy(d_inst + start, gi[i], sizeof(struct Inst) * count[i], hipMemcpyHostToDevice);
start += count[i];
}
hipMemcpyToSymbol(grouped_inst, &d_inst, sizeof(struct Inst *), 0, hipMemcpyHostToDevice);
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
free(gi[i]);
}
double *distanceTarget, *distanceMatrix1, *distanceMatrix2, *hinge_array;
hipMalloc((void **)&distanceTarget, sizeof(double) * ninst * kk);
hipMemcpyToSymbol(dist_target, &distanceTarget, sizeof(double *), 0, hipMemcpyHostToDevice);
if (nc == 2){
hipMalloc((void **)&distanceMatrix1, sizeof(double) * count[0] * count[1]);
hipMemcpyToSymbol(dist1, &distanceMatrix1, sizeof(double *), 0, hipMemcpyHostToDevice);
hipMalloc((void **)&hinge_array, sizeof(double) * count[0] * count[1] * 2 * kk);
hipMemcpyToSymbol(hinge_val, &hinge_array, sizeof(double *), 0, hipMemcpyHostToDevice);
}
else{
hipMalloc((void **)&distanceMatrix1, sizeof(double) * count[0] * count[3]);
hipMalloc((void **)&distanceMatrix2, sizeof(double) * count[1] * count[2]);
hipMemcpyToSymbol(dist1, &distanceMatrix1, sizeof(double *), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(dist2, &distanceMatrix2, sizeof(double *), 0, hipMemcpyHostToDevice);
}
}
void deviceInitMu(double m, double n){
double local_m = m;
hipMemcpyToSymbol(mu, &local_m, sizeof(double), 0, hipMemcpyHostToDevice);
double local_n = n;
hipMemcpyToSymbol(nu, &local_n, sizeof(double), 0, hipMemcpyHostToDevice);
}
void deviceInitO(double *o, int size){
double *d_t;
//cout << "double O: " << o[1] << endl;
hipMalloc((void **)&d_t, sizeof(double) * size);
hipMemcpy(d_t, o, sizeof(double) * size, hipMemcpyHostToDevice);
hipMemcpyToSymbol(O, &d_t, sizeof(double*), 0, hipMemcpyHostToDevice);
//cout << "d_t: " << d_t << endl;
hipMalloc((void **)&d_t, sizeof(double) * size);
hipMemcpyToSymbol(O, &d_t, sizeof(double*), sizeof(double*), hipMemcpyHostToDevice);
//cout << "d_t: " << d_t << endl;
}
void deviceInitTargetTerm(double *t, int size){
double *d_t;
hipMalloc((void **)&d_t, sizeof(double) * size);
hipMemcpy(d_t, t, sizeof(double) * size, hipMemcpyHostToDevice);
hipMemcpyToSymbol(t_target, &d_t, sizeof(double*), 0, hipMemcpyHostToDevice);
}
void deviceInitUpdateTerm(int size1, int size2){
double *d_t;
hipMalloc((void **)&d_t, sizeof(double) * size1);
hipMemcpyToSymbol(t_update, &d_t, sizeof(double*), 0, hipMemcpyHostToDevice);
hipMalloc((void **)&d_t, sizeof(double) * size2);
hipMemcpyToSymbol(t_gradient, &d_t, sizeof(double*), 0, hipMemcpyHostToDevice);
}
void deviceInitTri(int size){
double *t_o;
hipMalloc((void **)&t_o, sizeof(double) * size);
hipMemcpyToSymbol(t_triplet, &t_o, sizeof(double*), 0, hipMemcpyHostToDevice);
}
void deviceInitKnn(int n_train, int n_test, int kk){
double *d_knn;
hipMalloc((void **)&d_knn, sizeof(double) * n_test * n_train);
hipMemcpyToSymbol(dist_knn, &d_knn, sizeof(double*), 0, hipMemcpyHostToDevice);
int* i_knn;
hipMalloc((void **)&i_knn, sizeof(int) * n_test * n_train);
hipMemcpyToSymbol(ino_knn, &i_knn, sizeof(int*), 0, hipMemcpyHostToDevice);
hipMalloc((void **)&i_knn, sizeof(int) * n_test * kk);
hipMemcpyToSymbol(neighbor_knn, &i_knn, sizeof(int*), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nnegibor, &kk, sizeof(int), 0, hipMemcpyHostToDevice);
}
void kernelTest(int d, int n, int n_test, int kk, double *result, double mu, double alpha, double nu){
double dd[20];
int h_hits[4];
deviceInitKnn(n, n_test, 40);
//double f = DBL_MAX;
double f_old = DBL_MAX;
double min_iter = 0;
double global_max_acc = .0;
unsigned global_max_iter = 0;
//bool reduced = true;
int idx = 1;
//zeroHinge<<<84, 256>>>();
//zeroT_triplet<<<84, 256>>>();
unsigned iter = 0;
while(true){
// Run the event recording
hipEvent_t start_event, stop_event;
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
hipEventRecord(start_event, 0);
cout << endl << "Iter = " << iter << ", K = "<< kk << ", mu =" << mu << ", nu =" << nu << endl;
idx = 1 - idx;
hipMemcpyToSymbol(idx_o, &idx, sizeof(int), 0, hipMemcpyHostToDevice);
// update distances to targets(i,j) and between opposing points(i,l)
hipLaunchKernelGGL(( update2), dim3(84), dim3(256), 0, 0, );
// update t_triplet by calculating vdist of every (i, j, l)
hipLaunchKernelGGL(( zeroT_triplet), dim3(84), dim3(256), 0, 0, );
hipLaunchKernelGGL(( update3_2), dim3(84), dim3(256), 0, 0, );
//update3_3<<<84, 256>>>();
// update object function value
hipLaunchKernelGGL(( calcFval), dim3(84), dim3(256), 0, 0, );
hipDeviceSynchronize();
hipMemcpyFromSymbol(&dd[9], f_val, sizeof(double), 0, hipMemcpyDeviceToHost);
/*
if (dd[9] < f)
alpha *= 1.1;
else
alpha /= 2;
f = dd[9];
*/
cout << "f_val= " << dd[9];
if (dd[9] < f_old){
cout << ", reduced by " << f_old - dd[9] << endl;
f_old = dd[9];
min_iter = iter;
//reduced = true;
alpha *= 1.1;
hipLaunchKernelGGL(( knnUpdateDist), dim3(84), dim3(BSIZE), 0, 0, );
hipLaunchKernelGGL(( knnFindNeighbor), dim3(n_test), dim3(BSIZE), 0, 0, );
hipLaunchKernelGGL(( knnMatching), dim3(84), dim3(BSIZE), 0, 0, );
for (int i = 0; i < 20; ++ i){
hipLaunchKernelGGL(( knnAcc), dim3(1), dim3(BSIZE), 0, 0, 2 * i + 1);
hipDeviceSynchronize();
hipMemcpyFromSymbol(h_hits, hits, sizeof(int) * 4, 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&dd[i], acc_knn, sizeof(double), 0, hipMemcpyDeviceToHost);
cout << dd[i] << "(" << h_hits[0] << "," << h_hits[1] << "), ";
}
double max_acc = .0;
int max_acc_k = -1;
for (int i = 0; i < 20; ++ i){
if (dd[i] > max_acc){
max_acc = dd[i];
max_acc_k = 2 * i + 1;
}
}
if (max_acc >= global_max_acc&&iter>10){
global_max_acc = max_acc;
global_max_iter = iter;
}
cout << endl << "max acc = " << max_acc << " at k = " << max_acc_k
<< ". global max = " << global_max_acc << " at iter = " << global_max_iter;
}
else{
cout << ", increased by " << dd[9] - f_old;
//reduced = false;
alpha /= 2;
//int idx = iter % 2;
//if (reduced)
idx = 1 - idx;
hipMemcpyToSymbol(idx_o, &idx, sizeof(int), 0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( update2), dim3(84), dim3(256), 0, 0, );
// update t_triplet by calculating vdist of every (i, j, l)
hipLaunchKernelGGL(( zeroT_triplet), dim3(84), dim3(256), 0, 0, );
hipLaunchKernelGGL(( update3_2), dim3(84), dim3(256), 0, 0, );
//update3_3<<<84, 256>>>();
}
cout << endl << "min_f = " << f_old << " at iter " << min_iter << ", alpha = " << alpha << endl;
// t_update = I - 2 * alpha * (t_target + t_triplet)
hipLaunchKernelGGL(( updateUpdateTerm), dim3(84), dim3(256), 0, 0, alpha);
// update omega = omega * t_update
hipLaunchKernelGGL(( zeroO), dim3(84), dim3(256), 0, 0, );
dim3 dimGrid(d, (n - 1) / BSIZE + 1);
dim3 dimBlock(BSIZE);
hipLaunchKernelGGL(( updateO1), dim3(dimGrid), dim3(dimBlock), 0, 0, );
hipDeviceSynchronize();
float time_kernel;
hipEventRecord(stop_event, 0);
hipEventElapsedTime(&time_kernel, start_event, stop_event);
cout << "time " << time_kernel/1000 << endl;
++ iter;
//if (iter > 100)
if (alpha < 1e-10)
break;
}
}
/*
hipEvent_t start_event1, stop_event1;
hipEventCreate(&start_event1);
hipEventCreate(&stop_event1);
hipEventRecord(start_event1, 0);
hipDeviceSynchronize();
float time_kernel1;
hipEventRecord(stop_event1, 0);
hipEventElapsedTime(&time_kernel1, start_event1, stop_event1);
cout << "time1 " << time_kernel1/1000 << endl;
*/
|
8f1fa5492df55bffbb8e06bd83899434ebb5de55.cu
|
#include "mycommon.h"
#define BSIZE 256
__constant__ int nfeat;
__constant__ int ntrain;
__constant__ int ntest;
__constant__ int nclass;
__constant__ int k;
__constant__ int nnegibor;
__constant__ double mu;
__constant__ double nu;
__constant__ int idx_o;
__constant__ int *target;
__constant__ double *km_train;
__constant__ double *km_test;
__constant__ double *O[2];
__constant__ double *t_target;
__constant__ double *t_triplet;
__constant__ double *t_update;
__constant__ double *t_gradient;
__constant__ short *label_train;
__constant__ short *label_test;
__constant__ struct Inst *grouped_inst;
__constant__ unsigned typecount[4];
__constant__ int *position_index;
__constant__ double *dist_target;
__constant__ double *dist1;
__constant__ double *dist2;
__constant__ double *hinge_val;
__constant__ double *dist_knn;
__constant__ int *ino_knn;
__constant__ int *neighbor_knn;
__device__ double f_val;
__device__ double sub_fval[84];
__device__ double acc_knn;
__device__ int hits[4];
__device__ void kernelMatrix(double *km, double *d1, int n1, double *d2, int n2){
int ub = n1 * n2;
int stride = blockDim.x * gridDim.x;
double c_val;
int i, j;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ub; m += stride){
i = m / n2;
j = m % n2;
c_val = .0;
for (int n = 0; n < nfeat; ++ n)
c_val += pow(d1[n * n1 + i] - d2[n * n2 + j], 2);
km[m] = exp(-c_val / nfeat);
}
}
__global__ void calcKM(double *train, double *test){
kernelMatrix(km_train, train, ntrain, train, ntrain);
kernelMatrix(km_test, test, ntest, train, ntrain);
}
__device__ double getElement(double *m, int i, int j, int stride){
return *(m + i * stride + j);
}
__device__ void setElement(double *m, int i, int j, int stride, double val){
m[i * stride + j] = val;
}
__device__ int getElementInt(int *m, int i, int j, int stride){
return *(m + i * stride + j);
}
__device__ void setElementInt(int *m, int i, int j, int stride, int val){
m[i * stride + j] = val;
}
__device__ int getTargetDist(int i, int kk){
return dist_target[i * k + kk];
}
__device__ double calcDist(int i, double *km1, int j, double *km2){
int tid = threadIdx.x;
__shared__ double diff_k[256];
__shared__ double sum[256];
__shared__ double norm[64];
if (tid < 64)
norm[tid] = .0;
int pos;
for (int m = 0; m < (ntrain - 1)/blockDim.x + 1; ++ m){
__syncthreads();
pos = m * blockDim.x + tid;
if (pos < ntrain)
diff_k[tid] = getElement(km1, i, pos, ntrain) - getElement(km2, j, pos, ntrain);
for (int d = 0; d < nfeat; ++ d){
__syncthreads();
if (pos < ntrain)
sum[tid] = getElement(O[idx_o], d, pos, ntrain) * diff_k[tid];
else
sum[tid] = .0;
int stride = blockDim.x/2;
while (stride > 0){
__syncthreads();
if (tid < stride)
sum[tid] += sum[tid + stride];
stride /= 2;
}
__syncthreads();
if (tid == 0)
norm[d] += sum[0];
}
}
if (tid < nfeat)
norm[tid] = norm[tid]*norm[tid];
__syncthreads();
double s = .0;
for (int d = 0; d < nfeat; ++ d)
s += norm[d];
return s;
}
__device__ void calcTargetDist(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int i, j;
if (tid == 0)
sub_fval[bid] = .0;
for(int m = bid; m < ntrain * k; m += gridDim.x){
i = m / k;
j = target[m];
double val = calcDist(i, km_train, j, km_train);
if (tid == 0){
dist_target[m] = val;
sub_fval[bid] += val;
}
}
}
__device__ void updateDist(double *dist, struct Inst * inst1, int height, struct Inst * inst2, int width){
int tid = threadIdx.x;
int bid = blockIdx.x;
int i, j;
for (int m = bid; m < height * width; m += gridDim.x){
i = inst1[m / width].ino;
j = inst2[m % width].ino;
double val = calcDist(i, km_train, j, km_train);
if (tid == 0)
dist[m] = val;
}
}
__global__ void update2(){
calcTargetDist();
updateDist(dist1, grouped_inst, typecount[0], grouped_inst + typecount[0], typecount[1]);
}
__device__ int getTarget(int i, int kk){
return target[i * k + kk];
}
__global__ void zeroHinge(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < typecount[0] * typecount[1] * 2 * k; m += size)
hinge_val[m] = .0;
}
__device__ double hinge(double s){
if (s <= -1.0)
return .0;
else if (s >= 0)
return 1.0;
else
return 1 + s;
}
__device__ void updateTri(int idx1, int idx2, int idx3, double h){
__syncthreads();
for (int p = threadIdx.x; p < ntrain; p += blockDim.x)
t_triplet[p * ntrain + idx1] += h * (getElement(km_train, idx2, p, ntrain) - getElement(km_train, idx3, p, ntrain));
}
__global__ void zeroT_triplet(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < ntrain * ntrain; m += size)
t_triplet[m] = .0;
}
__global__ void update3_2(){
int bid = blockIdx.x;
int i, j, l;
double vdist, h;
if (bid == 0 && threadIdx.x == 0)
f_val = .0;
for (int m = 0; m < typecount[TN] * typecount[TP]; ++ m){
for (int kk = 0; kk < k; ++ kk){
i = grouped_inst[m / typecount[TP]].ino;
l = grouped_inst[typecount[TN] + m % typecount[TP]].ino;
j = getTarget(i, kk);
vdist = 1 + getElement(dist_target, i, kk, k) - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
if (label_train[i] == TP)
h *= nu;
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
l = grouped_inst[m / typecount[TP]].ino;
i = grouped_inst[typecount[TN] + m % typecount[TP]].ino;
j = getTarget(i, kk);
vdist = 1 + getElement(dist_target, i, kk, k) - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
if (label_train[i] == TP)
h *= nu;
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
}
}
}
__global__ void update3_3(){
int bid = blockIdx.x;
int i, j, l;
double vdist, h, *h_addr;
if (bid == 0 && threadIdx.x == 0)
f_val = .0;
for (int m = 0; m < typecount[TN] * typecount[TP]; ++ m){
for (int kk = 0; kk < k; ++ kk){
i = grouped_inst[m / typecount[TP]].ino;
l = grouped_inst[typecount[TN] + m % typecount[TP]].ino;
j = getTarget(i, kk);
vdist = 1 + getElement(dist_target, i, kk, k) - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h_addr = hinge_val + m * 2 * k + kk;
h = hinge(vdist);
if (h != (*h_addr)){
if (i % gridDim.x == bid)
updateTri(i, l, j, h - (*h_addr));
if (j % gridDim.x == bid)
updateTri(j, j, i, h - (*h_addr));
if (l % gridDim.x == bid)
updateTri(l, i, l, h - (*h_addr));
}
(*h_addr) = h;
l = grouped_inst[m / typecount[TP]].ino;
i = grouped_inst[typecount[TN] + m % typecount[TP]].ino;
j = getTarget(i, kk);
vdist = 1 + getElement(dist_target, i, kk, k) - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h_addr = hinge_val + m * 2 * k + k + kk;
h = hinge(vdist);
if (h != (*h_addr)){
if (i % gridDim.x == bid)
updateTri(i, l, j, h - (*h_addr));
if (j % gridDim.x == bid)
updateTri(j, j, i, h - (*h_addr));
if (l % gridDim.x == bid)
updateTri(l, i, l, h - (*h_addr));
}
(*h_addr) = h;
}
}
}
__global__ void calcFval(){
if (blockIdx.x == 0 && threadIdx.x == 0)
for (int i = 0; i < gridDim.x; ++ i)
f_val += sub_fval[i];
}
__global__ void updateUpdateTerm(double alpha){
int size = gridDim.x * blockDim.x;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ntrain * ntrain; m += size){
if (m/ntrain == m%ntrain)
t_update[m] = 1 - 2 * alpha * (t_target[m] + mu * t_triplet[m]);
//t_update[m] = 1 - 2 * alpha * ((1-mu) * t_target[m] + mu * t_triplet[m]);
else
t_update[m] = - 2 * alpha * (t_target[m] + mu * t_triplet[m]);
//t_update[m] = - 2 * alpha * ((1-mu) * t_target[m] + mu * t_triplet[m]);
}
}
__global__ void zeroO(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < nfeat * ntrain; m += size)
O[1 - idx_o][m] = .0;
}
__global__ void updateO(){
int tid = threadIdx.x;
int bid_row = blockIdx.x;
int bid_col = blockIdx.y;
__shared__ double o_element[BSIZE];
if (bid_col * BSIZE + tid < ntrain){
for (int start = 0; start < ntrain; start += BSIZE){
int len = min(BSIZE, ntrain - start);
if(tid < len)
o_element[tid] = getElement(O[idx_o], bid_row, start + tid, ntrain);
__syncthreads();
for (int i = 0; i < len; ++ i){
double val = o_element[i] * getElement(t_update, i + start, bid_col * BSIZE + tid, ntrain);
//__syncthreads();
O[1 - idx_o][bid_row * ntrain + bid_col * BSIZE + tid] += val;
}
}
}
}
__global__ void updateO1(){
int tid = threadIdx.x;
int bid_row = blockIdx.x;
int bid_col = blockIdx.y;
int workingtid = min(BSIZE, ntrain - bid_col * BSIZE);
if (tid < workingtid)
O[1 - idx_o][bid_row * ntrain + bid_col * BSIZE + tid] = .0;
//__shared__ double o_element[BSIZE];
for (int start = 0; start < ntrain; start += BSIZE){
int len = min(BSIZE, ntrain - start);
//if(tid < len)
// o_element[tid] = getElement(O[idx_o], bid_row, start + tid, ntrain);
//__syncthreads();
for (int i = 0; i < len; ++ i){
if (tid < workingtid){
double val = getElement(O[idx_o], bid_row, start + i, ntrain) * getElement(t_update, i + start, bid_col * BSIZE + tid, ntrain);
//double val = o_element[i] * getElement(t_update, i + start, bid_col * BSIZE + tid, ntrain);
//__syncthreads();
O[1 - idx_o][bid_row * ntrain + bid_col * BSIZE + tid] += val;
}
}
}
}
__global__ void knnUpdateDist(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x;
for(int m = bid; m < ntest * ntrain; m += size){
int i = m / ntrain;
int j = m % ntrain;
double d = calcDist(i, km_test, j, km_train);
if (tid == 0){
ino_knn[m] = j;
dist_knn[m] = d;
}
}
}
// lauched with # block = ntest
__global__ void knnFindNeighbor(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int len = ntrain / BSIZE;
int start = tid * len;
if (tid < ntrain % BSIZE){
start += tid;
++ len;
}
else
start += ntrain % BSIZE;
__syncthreads();
//int b = min(len, k);
int b = min(len, nnegibor);
for (int i = 0; i < b; ++ i)
for (int j = start; j < start + len - i - 1; ++ j)
if(getElement(dist_knn, bid, j, ntrain) < getElement(dist_knn, bid, j + 1, ntrain)){
double tmp_dist = getElement(dist_knn, bid, j, ntrain);
setElement(dist_knn, bid, j, ntrain, getElement(dist_knn, bid, j + 1, ntrain));
setElement(dist_knn, bid, j + 1, ntrain, tmp_dist);
int tmp_ino = getElementInt(ino_knn, bid, j, ntrain);
setElementInt(ino_knn, bid, j, ntrain, getElementInt(ino_knn, bid, j + 1, ntrain));
setElementInt(ino_knn, bid, j + 1, ntrain, tmp_ino);
}
__syncthreads();
__shared__ double dist[BSIZE];
__shared__ int ino[BSIZE];
__shared__ int shortest[BSIZE];
int p = start + len -1;
//for (int i = 0; i < k; ++ i){
for (int i = 0; i < nnegibor; ++ i){
if (b > 0){
dist[tid] = getElement(dist_knn, bid, p, ntrain);
ino[tid] = getElementInt(ino_knn, bid, p, ntrain);
}
else
dist[tid] = DBL_MAX;
shortest[tid] = tid;
int stride = blockDim.x/2;
while (stride > 0){
__syncthreads();
if (tid < stride){
if (dist[tid] > dist[tid + stride]){
dist[tid] = dist[tid + stride];
ino[tid] = ino[tid + stride];
shortest[tid] = shortest[tid + stride];
}
}
stride /= 2;
}
__syncthreads();
if(tid == 0)
//setElementInt(neighbor_knn, bid, i, k, ino[0]);
setElementInt(neighbor_knn, bid, i, nnegibor, ino[0]);
if(tid == shortest[0]){
-- b;
-- p;
}
}
}
__global__ void knnMatching(){
//int ub = ntest * k;
int ub = ntest * nnegibor;
int stride = blockDim.x * gridDim.x;
int idx_test, idx_train;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ub; m += stride){
//idx_test = m / k;
idx_test = m / nnegibor;
idx_train = neighbor_knn[m];
if (label_test[idx_test] == label_train[idx_train])
neighbor_knn[m] = 1;
else
neighbor_knn[m] = 0;
}
}
// lauch with single block
__global__ void knnAcc(int neiborhood_size){
int tid = threadIdx.x;
int stride = blockDim.x;
if (tid < 4)
hits[tid] = 0;
__shared__ int matched[BSIZE];
matched[tid] = 0;
for (int m = tid; m < ntest; m += stride){
int nsametype = 0;
for (int i = 0; i < neiborhood_size; ++ i)
//nsametype += neighbor_knn[m * k + i];
nsametype += neighbor_knn[m * nnegibor + i];
if (nsametype > neiborhood_size/2){
matched[tid] += 1;
if (label_test[m] == TP)
atomicAdd(&hits[TP], 1);
}
else{
if (label_test[m] == TN)
atomicSub(&hits[TN], 1);
}
}
int stride1 = blockDim.x/2;
while (stride1 > 0){
__syncthreads();
if (tid < stride1)
matched[tid] += matched[tid + stride1];
stride1 /= 2;
}
__syncthreads();
if (tid ==0)
acc_knn = 1.0 * matched[0] / ntest;
}
void deviceInitKernelMatrix(int *trainninst, int *testninst, int *nf, double *traindata, double *testdata){
cudaMemcpyToSymbol(ntrain, trainninst, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(ntest, testninst, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nfeat, nf, sizeof(int), 0, cudaMemcpyHostToDevice);
double *d_train_data, *d_test_data;
cudaMalloc((void **)&d_train_data, sizeof(double) * (*trainninst) * (*nf));
cudaMalloc((void **)&d_test_data, sizeof(double) * (*testninst) * (*nf));
cudaMemcpy(d_train_data, traindata, sizeof(double) * (*trainninst) * (*nf), cudaMemcpyHostToDevice);
cudaMemcpy(d_test_data, testdata, sizeof(double) * (*testninst) * (*nf), cudaMemcpyHostToDevice);
double *d_kernel_matrix_train, *d_kernel_matrix_test;
cudaMalloc((void **)&d_kernel_matrix_train, sizeof(double) * (*trainninst) * (*trainninst));
cudaMemcpyToSymbol(km_train, &d_kernel_matrix_train, sizeof(double*), 0, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_kernel_matrix_test, sizeof(double) * (*testninst) * (*trainninst));
cudaMemcpyToSymbol(km_test, &d_kernel_matrix_test, sizeof(double*), 0, cudaMemcpyHostToDevice);
// Run the event recording
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event) ;
cudaEventCreate(&stop_event) ;
cudaEventRecord(start_event, 0);
calcKM<<<84, 256>>>(d_train_data, d_test_data);
cudaThreadSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaFree(d_train_data);
cudaFree(d_test_data);
}
void deviceInitTarget(int *h_target, int trainninst, int *kk, int *nc){
int *d_target;
cudaMalloc((void **)&d_target, sizeof(int) * trainninst * (*kk));
cudaMemcpy(d_target, h_target, sizeof(int) * trainninst * (*kk), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(target, &d_target, sizeof(int*), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(k, kk, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nclass, nc, sizeof(int), 0, cudaMemcpyHostToDevice);
}
void deviceInitLabelTrain(struct Inst *inst, unsigned ninst){
short *label = new short[ninst];
for (int i = 0; i < ninst; ++ i)
label[i] = inst[i].label;
short *d_label;
cudaMalloc((void **)&d_label, sizeof(short) * ninst);
cudaMemcpy(d_label, label, sizeof(short) * ninst, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(label_train, &d_label, sizeof(short*), 0, cudaMemcpyHostToDevice);
delete[] label;
}
void deviceInitLabelTest(struct Inst *inst, unsigned ninst){
short *label = new short[ninst];
for (int i = 0; i < ninst; ++ i)
label[i] = inst[i].label;
short *d_label;
cudaMalloc((void **)&d_label, sizeof(short) * ninst);
cudaMemcpy(d_label, label, sizeof(short) * ninst, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(label_test, &d_label, sizeof(short*), 0, cudaMemcpyHostToDevice);
delete[] label;
}
void deviceInitInstList(struct Inst *inst, unsigned *count, unsigned ninst, int nc, int kk){
cudaMemcpyToSymbol(typecount, count, sizeof(unsigned) * 4, 0, cudaMemcpyHostToDevice);
struct Inst *gi[4];
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
gi[i] = (struct Inst *)malloc(sizeof(struct Inst) * count[i]);
}
//int *index = new int[ninst];
int p[4] = {0, 0, 0, 0};
for(int i = 0; i < ninst; ++ i){
int type = inst[i].label;
gi[type][p[type]].ino = inst[i].ino;
gi[type][p[type]].label = inst[i].label;
//index[i] = p[type];
//for(int j = 0; j < inst[i].label; ++ j)
//index[i] += count[j];
++ p[type];
}
struct Inst *d_inst;
cudaMalloc((void **)&d_inst, sizeof(struct Inst) * ninst);
unsigned start = 0;
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
cudaMemcpy(d_inst + start, gi[i], sizeof(struct Inst) * count[i], cudaMemcpyHostToDevice);
start += count[i];
}
cudaMemcpyToSymbol(grouped_inst, &d_inst, sizeof(struct Inst *), 0, cudaMemcpyHostToDevice);
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
free(gi[i]);
}
double *distanceTarget, *distanceMatrix1, *distanceMatrix2, *hinge_array;
cudaMalloc((void **)&distanceTarget, sizeof(double) * ninst * kk);
cudaMemcpyToSymbol(dist_target, &distanceTarget, sizeof(double *), 0, cudaMemcpyHostToDevice);
if (nc == 2){
cudaMalloc((void **)&distanceMatrix1, sizeof(double) * count[0] * count[1]);
cudaMemcpyToSymbol(dist1, &distanceMatrix1, sizeof(double *), 0, cudaMemcpyHostToDevice);
cudaMalloc((void **)&hinge_array, sizeof(double) * count[0] * count[1] * 2 * kk);
cudaMemcpyToSymbol(hinge_val, &hinge_array, sizeof(double *), 0, cudaMemcpyHostToDevice);
}
else{
cudaMalloc((void **)&distanceMatrix1, sizeof(double) * count[0] * count[3]);
cudaMalloc((void **)&distanceMatrix2, sizeof(double) * count[1] * count[2]);
cudaMemcpyToSymbol(dist1, &distanceMatrix1, sizeof(double *), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(dist2, &distanceMatrix2, sizeof(double *), 0, cudaMemcpyHostToDevice);
}
}
void deviceInitMu(double m, double n){
double local_m = m;
cudaMemcpyToSymbol(mu, &local_m, sizeof(double), 0, cudaMemcpyHostToDevice);
double local_n = n;
cudaMemcpyToSymbol(nu, &local_n, sizeof(double), 0, cudaMemcpyHostToDevice);
}
void deviceInitO(double *o, int size){
double *d_t;
//cout << "double O: " << o[1] << endl;
cudaMalloc((void **)&d_t, sizeof(double) * size);
cudaMemcpy(d_t, o, sizeof(double) * size, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(O, &d_t, sizeof(double*), 0, cudaMemcpyHostToDevice);
//cout << "d_t: " << d_t << endl;
cudaMalloc((void **)&d_t, sizeof(double) * size);
cudaMemcpyToSymbol(O, &d_t, sizeof(double*), sizeof(double*), cudaMemcpyHostToDevice);
//cout << "d_t: " << d_t << endl;
}
void deviceInitTargetTerm(double *t, int size){
double *d_t;
cudaMalloc((void **)&d_t, sizeof(double) * size);
cudaMemcpy(d_t, t, sizeof(double) * size, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(t_target, &d_t, sizeof(double*), 0, cudaMemcpyHostToDevice);
}
void deviceInitUpdateTerm(int size1, int size2){
double *d_t;
cudaMalloc((void **)&d_t, sizeof(double) * size1);
cudaMemcpyToSymbol(t_update, &d_t, sizeof(double*), 0, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_t, sizeof(double) * size2);
cudaMemcpyToSymbol(t_gradient, &d_t, sizeof(double*), 0, cudaMemcpyHostToDevice);
}
void deviceInitTri(int size){
double *t_o;
cudaMalloc((void **)&t_o, sizeof(double) * size);
cudaMemcpyToSymbol(t_triplet, &t_o, sizeof(double*), 0, cudaMemcpyHostToDevice);
}
void deviceInitKnn(int n_train, int n_test, int kk){
double *d_knn;
cudaMalloc((void **)&d_knn, sizeof(double) * n_test * n_train);
cudaMemcpyToSymbol(dist_knn, &d_knn, sizeof(double*), 0, cudaMemcpyHostToDevice);
int* i_knn;
cudaMalloc((void **)&i_knn, sizeof(int) * n_test * n_train);
cudaMemcpyToSymbol(ino_knn, &i_knn, sizeof(int*), 0, cudaMemcpyHostToDevice);
cudaMalloc((void **)&i_knn, sizeof(int) * n_test * kk);
cudaMemcpyToSymbol(neighbor_knn, &i_knn, sizeof(int*), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nnegibor, &kk, sizeof(int), 0, cudaMemcpyHostToDevice);
}
void kernelTest(int d, int n, int n_test, int kk, double *result, double mu, double alpha, double nu){
double dd[20];
int h_hits[4];
deviceInitKnn(n, n_test, 40);
//double f = DBL_MAX;
double f_old = DBL_MAX;
double min_iter = 0;
double global_max_acc = .0;
unsigned global_max_iter = 0;
//bool reduced = true;
int idx = 1;
//zeroHinge<<<84, 256>>>();
//zeroT_triplet<<<84, 256>>>();
unsigned iter = 0;
while(true){
// Run the event recording
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
cout << endl << "Iter = " << iter << ", K = "<< kk << ", mu =" << mu << ", nu =" << nu << endl;
idx = 1 - idx;
cudaMemcpyToSymbol(idx_o, &idx, sizeof(int), 0, cudaMemcpyHostToDevice);
// update distances to targets(i,j) and between opposing points(i,l)
update2<<<84, 256>>>();
// update t_triplet by calculating vdist of every (i, j, l)
zeroT_triplet<<<84, 256>>>();
update3_2<<<84, 256>>>();
//update3_3<<<84, 256>>>();
// update object function value
calcFval<<<84, 256>>>();
cudaThreadSynchronize();
cudaMemcpyFromSymbol(&dd[9], f_val, sizeof(double), 0, cudaMemcpyDeviceToHost);
/*
if (dd[9] < f)
alpha *= 1.1;
else
alpha /= 2;
f = dd[9];
*/
cout << "f_val= " << dd[9];
if (dd[9] < f_old){
cout << ", reduced by " << f_old - dd[9] << endl;
f_old = dd[9];
min_iter = iter;
//reduced = true;
alpha *= 1.1;
knnUpdateDist<<<84, BSIZE>>>();
knnFindNeighbor<<<n_test, BSIZE>>>();
knnMatching<<<84, BSIZE>>>();
for (int i = 0; i < 20; ++ i){
knnAcc<<<1, BSIZE>>>(2 * i + 1);
cudaThreadSynchronize();
cudaMemcpyFromSymbol(h_hits, hits, sizeof(int) * 4, 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&dd[i], acc_knn, sizeof(double), 0, cudaMemcpyDeviceToHost);
cout << dd[i] << "(" << h_hits[0] << "," << h_hits[1] << "), ";
}
double max_acc = .0;
int max_acc_k = -1;
for (int i = 0; i < 20; ++ i){
if (dd[i] > max_acc){
max_acc = dd[i];
max_acc_k = 2 * i + 1;
}
}
if (max_acc >= global_max_acc&&iter>10){
global_max_acc = max_acc;
global_max_iter = iter;
}
cout << endl << "max acc = " << max_acc << " at k = " << max_acc_k
<< ". global max = " << global_max_acc << " at iter = " << global_max_iter;
}
else{
cout << ", increased by " << dd[9] - f_old;
//reduced = false;
alpha /= 2;
//int idx = iter % 2;
//if (reduced)
idx = 1 - idx;
cudaMemcpyToSymbol(idx_o, &idx, sizeof(int), 0, cudaMemcpyHostToDevice);
update2<<<84, 256>>>();
// update t_triplet by calculating vdist of every (i, j, l)
zeroT_triplet<<<84, 256>>>();
update3_2<<<84, 256>>>();
//update3_3<<<84, 256>>>();
}
cout << endl << "min_f = " << f_old << " at iter " << min_iter << ", alpha = " << alpha << endl;
// t_update = I - 2 * alpha * (t_target + t_triplet)
updateUpdateTerm<<<84, 256>>>(alpha);
// update omega = omega * t_update
zeroO<<<84, 256>>>();
dim3 dimGrid(d, (n - 1) / BSIZE + 1);
dim3 dimBlock(BSIZE);
updateO1<<<dimGrid, dimBlock>>>();
cudaThreadSynchronize();
float time_kernel;
cudaEventRecord(stop_event, 0);
cudaEventElapsedTime(&time_kernel, start_event, stop_event);
cout << "time " << time_kernel/1000 << endl;
++ iter;
//if (iter > 100)
if (alpha < 1e-10)
break;
}
}
/*
cudaEvent_t start_event1, stop_event1;
cudaEventCreate(&start_event1);
cudaEventCreate(&stop_event1);
cudaEventRecord(start_event1, 0);
cudaThreadSynchronize();
float time_kernel1;
cudaEventRecord(stop_event1, 0);
cudaEventElapsedTime(&time_kernel1, start_event1, stop_event1);
cout << "time1 " << time_kernel1/1000 << endl;
*/
|
f2e27cc78f2e75e925332d965a7603a65d6d1d64.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: winter term 2012/13 / March 11-18, 2013
*
* project: superresolution
* file: flowlib_gpu_sor.cu
*
*
* implement all functions with ### implement me ### in the function body
\****************************************************************************/
/*
* flowlib_gpu_sor.cu
*
* Created on: Mar 14, 2012
* Author: steinbrf
*/
//#include <flowlib_gpu_sor.hpp>
#include "flowlib.hpp"
#include <auxiliary/cuda_basic.cuh>
#include <linearoperations/linearoperations.cuh>
#include <auxiliary/debug.hpp>
hipChannelFormatDesc flow_sor_float_tex = hipCreateChannelDesc<float>();
texture<float, 2, hipReadModeElementType> tex_flow_sor_I1;
texture<float, 2, hipReadModeElementType> tex_flow_sor_I2;
bool textures_flow_sor_initialized = false;
#define IMAGE_FILTER_METHOD hipFilterModeLinear
#define SF_TEXTURE_OFFSET 0.5f
#define SF_BW 16
#define SF_BH 16
FlowLibGpuSOR::FlowLibGpuSOR(int par_nx, int par_ny):
FlowLib(par_nx,par_ny),FlowLibGpu(par_nx,par_ny),FlowLibSOR(par_nx,par_ny)
{
cuda_malloc2D((void**)&_penDat,_nx,_ny,1,sizeof(float),&_pitchf1);
cuda_malloc2D((void**)&_penReg,_nx,_ny,1,sizeof(float),&_pitchf1);
cuda_malloc2D((void**)&_b1,_nx,_ny,1,sizeof(float),&_pitchf1);
cuda_malloc2D((void**)&_b2,_nx,_ny,1,sizeof(float),&_pitchf1);
}
FlowLibGpuSOR::~FlowLibGpuSOR()
{
if(_penDat) cutilSafeCall(hipFree(_penDat));
if(_penReg) cutilSafeCall(hipFree(_penReg));
if(_b1) cutilSafeCall(hipFree(_b1));
if(_b2) cutilSafeCall(hipFree(_b2));
}
void bind_textures(const float *I1_g, const float *I2_g, int nx, int ny, int pitchf1)
{
tex_flow_sor_I1.addressMode[0] = hipAddressModeClamp;
tex_flow_sor_I1.addressMode[1] = hipAddressModeClamp;
tex_flow_sor_I1.filterMode = IMAGE_FILTER_METHOD ;
tex_flow_sor_I1.normalized = false;
tex_flow_sor_I2.addressMode[0] = hipAddressModeClamp;
tex_flow_sor_I2.addressMode[1] = hipAddressModeClamp;
tex_flow_sor_I2.filterMode = IMAGE_FILTER_METHOD;
tex_flow_sor_I2.normalized = false;
cutilSafeCall( hipBindTexture2D(0, &tex_flow_sor_I1, I1_g,
&flow_sor_float_tex, nx, ny, pitchf1*sizeof(float)) );
cutilSafeCall( hipBindTexture2D(0, &tex_flow_sor_I2, I2_g,
&flow_sor_float_tex, nx, ny, pitchf1*sizeof(float)) );
}
void unbind_textures_flow_sor()
{
cutilSafeCall (hipUnbindTexture(tex_flow_sor_I1));
cutilSafeCall (hipUnbindTexture(tex_flow_sor_I2));
}
void update_textures_flow_sor(const float *I2_resampled_warped_g, int nx_fine, int ny_fine, int pitchf1)
{
cutilSafeCall (hipUnbindTexture(tex_flow_sor_I2));
cutilSafeCall( hipBindTexture2D(0, &tex_flow_sor_I2, I2_resampled_warped_g,
&flow_sor_float_tex, nx_fine, ny_fine, pitchf1*sizeof(float)) );
}
/**
* @brief Adds one flow field onto another
* @param du_g Horizontal increment
* @param dv_g Vertical increment
* @param u_g Horizontal accumulation
* @param v_g Vertical accumulation
* @param nx Image width
* @param ny Image height
* @param pitchf1 Image pitch for single float images
*/
__global__ void add_flow_fields
(
const float *du_g,
const float *dv_g,
float *u_g,
float *v_g,
int nx,
int ny,
int pitchf1
)
{
// ### Implement Me###
}
/**
* @brief Kernel to compute the penalty values for several
* lagged-diffusivity iterations taking into account pixel sizes for warping.
* Image derivatives are read from texture, flow derivatives from shared memory
* @param u_g Pointer to global device memory for the horizontal
* flow component of the accumulation flow field
* @param v_g Pointer to global device memory for the vertical
* flow component of the accumulation flow field
* @param du_g Pointer to global device memory for the horizontal
* flow component of the increment flow field
* @param dv_g Pointer to global device memory for the vertical
* flow component of the increment flow field
* @param penaltyd_g Pointer to global device memory for data term penalty
* @param penaltyr_g Pointer to global device memory for regularity term
* penalty
* @param nx Image width
* @param ny Image height
* @param hx Horizontal pixel size
* @param hy Vertical pixel size
* @param data_epsilon Smoothing parameter for the TV Penalization of the data
* term
* @param diff_epsilon Smoothing parameter for the TV Penalization of the
* regularity term
* @param pitchf1 Image pitch for single float images
*/
__global__ void sorflow_update_robustifications_warp_tex_shared
(
const float *u_g,
const float *v_g,
const float *du_g,
const float *dv_g,
float *penaltyd_g,
float *penaltyr_g,
int nx,
int ny,
float hx,
float hy,
float data_epsilon,
float diff_epsilon,
int pitchf1
)
{
// ### Implement Me###
}
/**
* @brief Precomputes one value as the sum of all values not depending of the
* current flow increment
* @param u_g Pointer to global device memory for the horizontal
* flow component of the accumulation flow field
* @param v_g Pointer to global device memory for the vertical
* flow component of the accumulation flow field
* @param penaltyd_g Pointer to global device memory for data term penalty
* @param penaltyr_g Pointer to global device memory for regularity term
* penalty
* @param bu_g Pointer to global memory for horizontal result value
* @param bv_g Pointer to global memory for vertical result value
* @param nx Image width
* @param ny Image height
* @param hx Horizontal pixel size
* @param hy Vertical pixel size
* @param lambda Smoothness weight
* @param pitchf1 Image pitch for single float images
*/
__global__ void sorflow_update_righthandside_shared
(
const float *u_g,
const float *v_g,
const float *penaltyd_g,
const float *penaltyr_g,
float *bu_g,
float *bv_g,
int nx,
int ny,
float hx,
float hy,
float lambda,
int pitchf1
)
{
// ### Implement Me###
}
/**
* @brief Kernel to compute one Red-Black-SOR iteration for the nonlinear
* Euler-Lagrange equation taking into account penalty values and pixel
* size for warping
* @param bu_g Right-Hand-Side values for horizontal flow
* @param bv_g Right-Hand-Side values for vertical flow
* @param penaltyd_g Pointer to global device memory holding data term penalization
* @param penaltyr_g Pointer to global device memory holding regularity term
* penalization
* @param du_g Pointer to global device memory for the horizontal
* flow component increment
* @param dv_g Pointer to global device memory for the vertical
* flow component increment
* @param nx Image width
* @param ny Image height
* @param hx Horizontal pixel size
* @param hy Vertical pixel size
* @param lambda Smoothness weight
* @param relaxation Overrelaxation for the SOR-solver
* @param red Parameter deciding whether the red or black fields of a
* checkerboard pattern are being updated
* @param pitchf1 Image pitch for single float images
*/
__global__ void sorflow_nonlinear_warp_sor_shared
(
const float *bu_g,
const float *bv_g,
const float *penaltyd_g,
const float *penaltyr_g,
float *du_g,
float *dv_g,
int nx,
int ny,
float hx,
float hy,
float lambda,
float relaxation,
int red,
int pitchf1
)
{
// ### Implement Me ###
}
/**
* @brief Method that calls the sorflow_nonlinear_warp_sor_shared in a loop,
* with an outer loop for computing the diffisivity values for
* one level of a coarse-to-fine implementation.
* @param u_g Pointer to global device memory for the horizontal
* flow component
* @param v_g Pointer to global device memory for the vertical
* flow component
* @param du_g Pointer to global device memory for the horizontal
* flow component increment
* @param dv_g Pointer to global device memory for the vertical
* flow component increment
* @param bu_g Right-Hand-Side values for horizontal flow
* @param bv_g Right-Hand-Side values for vertical flow
* @param penaltyd_g Pointer to global device memory holding data term penalization
* @param penaltyr_g Pointer to global device memory holding regularity term
* penalization
* @param nx Image width
* @param ny Image height
* @param pitchf1 Image pitch for single float images
* @param hx Horizontal pixel size
* @param hy Vertical pixel size
* @param lambda Smoothness weight
* @param outer_iterations Number of iterations of the penalty computation
* @param inner_iterations Number of iterations for the SOR-solver
* @param relaxation Overrelaxation for the SOR-solver
* @param data_epsilon Smoothing parameter for the TV Penalization of the data
* term
* @param diff_epsilon Smoothing parameter for the TV Penalization of the
* regularity term
*/
void sorflow_gpu_nonlinear_warp_level
(
const float *u_g,
const float *v_g,
float *du_g,
float *dv_g,
float *bu_g,
float *bv_g,
float *penaltyd_g,
float *penaltyr_g,
int nx,
int ny,
int pitchf1,
float hx,
float hy,
float lambda,
float overrelaxation,
int outer_iterations,
int inner_iterations,
float data_epsilon,
float diff_epsilon
)
{
// ### Implement Me ###
}
float FlowLibGpuSOR::computeFlow()
{
// ### Implement Me###
}
|
f2e27cc78f2e75e925332d965a7603a65d6d1d64.cu
|
/****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: winter term 2012/13 / March 11-18, 2013
*
* project: superresolution
* file: flowlib_gpu_sor.cu
*
*
* implement all functions with ### implement me ### in the function body
\****************************************************************************/
/*
* flowlib_gpu_sor.cu
*
* Created on: Mar 14, 2012
* Author: steinbrf
*/
//#include <flowlib_gpu_sor.hpp>
#include "flowlib.hpp"
#include <auxiliary/cuda_basic.cuh>
#include <linearoperations/linearoperations.cuh>
#include <auxiliary/debug.hpp>
cudaChannelFormatDesc flow_sor_float_tex = cudaCreateChannelDesc<float>();
texture<float, 2, cudaReadModeElementType> tex_flow_sor_I1;
texture<float, 2, cudaReadModeElementType> tex_flow_sor_I2;
bool textures_flow_sor_initialized = false;
#define IMAGE_FILTER_METHOD cudaFilterModeLinear
#define SF_TEXTURE_OFFSET 0.5f
#define SF_BW 16
#define SF_BH 16
FlowLibGpuSOR::FlowLibGpuSOR(int par_nx, int par_ny):
FlowLib(par_nx,par_ny),FlowLibGpu(par_nx,par_ny),FlowLibSOR(par_nx,par_ny)
{
cuda_malloc2D((void**)&_penDat,_nx,_ny,1,sizeof(float),&_pitchf1);
cuda_malloc2D((void**)&_penReg,_nx,_ny,1,sizeof(float),&_pitchf1);
cuda_malloc2D((void**)&_b1,_nx,_ny,1,sizeof(float),&_pitchf1);
cuda_malloc2D((void**)&_b2,_nx,_ny,1,sizeof(float),&_pitchf1);
}
FlowLibGpuSOR::~FlowLibGpuSOR()
{
if(_penDat) cutilSafeCall(cudaFree(_penDat));
if(_penReg) cutilSafeCall(cudaFree(_penReg));
if(_b1) cutilSafeCall(cudaFree(_b1));
if(_b2) cutilSafeCall(cudaFree(_b2));
}
void bind_textures(const float *I1_g, const float *I2_g, int nx, int ny, int pitchf1)
{
tex_flow_sor_I1.addressMode[0] = cudaAddressModeClamp;
tex_flow_sor_I1.addressMode[1] = cudaAddressModeClamp;
tex_flow_sor_I1.filterMode = IMAGE_FILTER_METHOD ;
tex_flow_sor_I1.normalized = false;
tex_flow_sor_I2.addressMode[0] = cudaAddressModeClamp;
tex_flow_sor_I2.addressMode[1] = cudaAddressModeClamp;
tex_flow_sor_I2.filterMode = IMAGE_FILTER_METHOD;
tex_flow_sor_I2.normalized = false;
cutilSafeCall( cudaBindTexture2D(0, &tex_flow_sor_I1, I1_g,
&flow_sor_float_tex, nx, ny, pitchf1*sizeof(float)) );
cutilSafeCall( cudaBindTexture2D(0, &tex_flow_sor_I2, I2_g,
&flow_sor_float_tex, nx, ny, pitchf1*sizeof(float)) );
}
void unbind_textures_flow_sor()
{
cutilSafeCall (cudaUnbindTexture(tex_flow_sor_I1));
cutilSafeCall (cudaUnbindTexture(tex_flow_sor_I2));
}
void update_textures_flow_sor(const float *I2_resampled_warped_g, int nx_fine, int ny_fine, int pitchf1)
{
cutilSafeCall (cudaUnbindTexture(tex_flow_sor_I2));
cutilSafeCall( cudaBindTexture2D(0, &tex_flow_sor_I2, I2_resampled_warped_g,
&flow_sor_float_tex, nx_fine, ny_fine, pitchf1*sizeof(float)) );
}
/**
* @brief Adds one flow field onto another
* @param du_g Horizontal increment
* @param dv_g Vertical increment
* @param u_g Horizontal accumulation
* @param v_g Vertical accumulation
* @param nx Image width
* @param ny Image height
* @param pitchf1 Image pitch for single float images
*/
__global__ void add_flow_fields
(
const float *du_g,
const float *dv_g,
float *u_g,
float *v_g,
int nx,
int ny,
int pitchf1
)
{
// ### Implement Me###
}
/**
* @brief Kernel to compute the penalty values for several
* lagged-diffusivity iterations taking into account pixel sizes for warping.
* Image derivatives are read from texture, flow derivatives from shared memory
* @param u_g Pointer to global device memory for the horizontal
* flow component of the accumulation flow field
* @param v_g Pointer to global device memory for the vertical
* flow component of the accumulation flow field
* @param du_g Pointer to global device memory for the horizontal
* flow component of the increment flow field
* @param dv_g Pointer to global device memory for the vertical
* flow component of the increment flow field
* @param penaltyd_g Pointer to global device memory for data term penalty
* @param penaltyr_g Pointer to global device memory for regularity term
* penalty
* @param nx Image width
* @param ny Image height
* @param hx Horizontal pixel size
* @param hy Vertical pixel size
* @param data_epsilon Smoothing parameter for the TV Penalization of the data
* term
* @param diff_epsilon Smoothing parameter for the TV Penalization of the
* regularity term
* @param pitchf1 Image pitch for single float images
*/
__global__ void sorflow_update_robustifications_warp_tex_shared
(
const float *u_g,
const float *v_g,
const float *du_g,
const float *dv_g,
float *penaltyd_g,
float *penaltyr_g,
int nx,
int ny,
float hx,
float hy,
float data_epsilon,
float diff_epsilon,
int pitchf1
)
{
// ### Implement Me###
}
/**
* @brief Precomputes one value as the sum of all values not depending of the
* current flow increment
* @param u_g Pointer to global device memory for the horizontal
* flow component of the accumulation flow field
* @param v_g Pointer to global device memory for the vertical
* flow component of the accumulation flow field
* @param penaltyd_g Pointer to global device memory for data term penalty
* @param penaltyr_g Pointer to global device memory for regularity term
* penalty
* @param bu_g Pointer to global memory for horizontal result value
* @param bv_g Pointer to global memory for vertical result value
* @param nx Image width
* @param ny Image height
* @param hx Horizontal pixel size
* @param hy Vertical pixel size
* @param lambda Smoothness weight
* @param pitchf1 Image pitch for single float images
*/
__global__ void sorflow_update_righthandside_shared
(
const float *u_g,
const float *v_g,
const float *penaltyd_g,
const float *penaltyr_g,
float *bu_g,
float *bv_g,
int nx,
int ny,
float hx,
float hy,
float lambda,
int pitchf1
)
{
// ### Implement Me###
}
/**
* @brief Kernel to compute one Red-Black-SOR iteration for the nonlinear
* Euler-Lagrange equation taking into account penalty values and pixel
* size for warping
* @param bu_g Right-Hand-Side values for horizontal flow
* @param bv_g Right-Hand-Side values for vertical flow
* @param penaltyd_g Pointer to global device memory holding data term penalization
* @param penaltyr_g Pointer to global device memory holding regularity term
* penalization
* @param du_g Pointer to global device memory for the horizontal
* flow component increment
* @param dv_g Pointer to global device memory for the vertical
* flow component increment
* @param nx Image width
* @param ny Image height
* @param hx Horizontal pixel size
* @param hy Vertical pixel size
* @param lambda Smoothness weight
* @param relaxation Overrelaxation for the SOR-solver
* @param red Parameter deciding whether the red or black fields of a
* checkerboard pattern are being updated
* @param pitchf1 Image pitch for single float images
*/
__global__ void sorflow_nonlinear_warp_sor_shared
(
const float *bu_g,
const float *bv_g,
const float *penaltyd_g,
const float *penaltyr_g,
float *du_g,
float *dv_g,
int nx,
int ny,
float hx,
float hy,
float lambda,
float relaxation,
int red,
int pitchf1
)
{
// ### Implement Me ###
}
/**
* @brief Method that calls the sorflow_nonlinear_warp_sor_shared in a loop,
* with an outer loop for computing the diffisivity values for
* one level of a coarse-to-fine implementation.
* @param u_g Pointer to global device memory for the horizontal
* flow component
* @param v_g Pointer to global device memory for the vertical
* flow component
* @param du_g Pointer to global device memory for the horizontal
* flow component increment
* @param dv_g Pointer to global device memory for the vertical
* flow component increment
* @param bu_g Right-Hand-Side values for horizontal flow
* @param bv_g Right-Hand-Side values for vertical flow
* @param penaltyd_g Pointer to global device memory holding data term penalization
* @param penaltyr_g Pointer to global device memory holding regularity term
* penalization
* @param nx Image width
* @param ny Image height
* @param pitchf1 Image pitch for single float images
* @param hx Horizontal pixel size
* @param hy Vertical pixel size
* @param lambda Smoothness weight
* @param outer_iterations Number of iterations of the penalty computation
* @param inner_iterations Number of iterations for the SOR-solver
* @param relaxation Overrelaxation for the SOR-solver
* @param data_epsilon Smoothing parameter for the TV Penalization of the data
* term
* @param diff_epsilon Smoothing parameter for the TV Penalization of the
* regularity term
*/
void sorflow_gpu_nonlinear_warp_level
(
const float *u_g,
const float *v_g,
float *du_g,
float *dv_g,
float *bu_g,
float *bv_g,
float *penaltyd_g,
float *penaltyr_g,
int nx,
int ny,
int pitchf1,
float hx,
float hy,
float lambda,
float overrelaxation,
int outer_iterations,
int inner_iterations,
float data_epsilon,
float diff_epsilon
)
{
// ### Implement Me ###
}
float FlowLibGpuSOR::computeFlow()
{
// ### Implement Me###
}
|
c7bb9adc3098ef3ea77be695e24ea59c897b7f63.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
#include<opencv2/opencv.hpp>
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
// std::cout << "relu top bottom = " << (int64_t)top[0] << " " << (int64_t)bottom[0] << "\n";
// std::cout << "relu top bottom data pointers = " << (int64_t)top_data << " " << (int64_t)bottom_data << "\n";
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
//#define SAVE_FEATURES
#ifdef SAVE_FEATURES
if(this->phase_ != TRAIN)
{
static int instance = 0;
int f_map = 0;
int out_count = 0;
for(int l=0;l<top[0]->shape(0);l++)
{
for(int i=0;i<top[0]->shape(1);i++)
{
cv::Mat image = cv::Mat::zeros(top[0]->shape(2),top[0]->shape(3),CV_8UC1);
int count = 0 ;
for(int j=0;j<top[0]->shape(2);j++)
{
for(int k=0;k<top[0]->shape(3);k++)
{
// std::cout << top[0]->cpu_data()[count++] << " ";
image.data[count] = 255 * top[0]->mutable_cpu_data()[out_count++];
image.data[count] = image.data[count] > 255 ? 255 : image.data[count];
count++;
}
}
std::stringstream im_name;
im_name << "/home/isl-server/ashish/op_feature_maps/" << instance << "_" << f_map << ".png";
cv::imwrite(im_name.str(),image);
f_map++;
}
}
instance++;
}
#endif
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
|
c7bb9adc3098ef3ea77be695e24ea59c897b7f63.cu
|
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
#include<opencv2/opencv.hpp>
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
// std::cout << "relu top bottom = " << (int64_t)top[0] << " " << (int64_t)bottom[0] << "\n";
// std::cout << "relu top bottom data pointers = " << (int64_t)top_data << " " << (int64_t)bottom_data << "\n";
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
//#define SAVE_FEATURES
#ifdef SAVE_FEATURES
if(this->phase_ != TRAIN)
{
static int instance = 0;
int f_map = 0;
int out_count = 0;
for(int l=0;l<top[0]->shape(0);l++)
{
for(int i=0;i<top[0]->shape(1);i++)
{
cv::Mat image = cv::Mat::zeros(top[0]->shape(2),top[0]->shape(3),CV_8UC1);
int count = 0 ;
for(int j=0;j<top[0]->shape(2);j++)
{
for(int k=0;k<top[0]->shape(3);k++)
{
// std::cout << top[0]->cpu_data()[count++] << " ";
image.data[count] = 255 * top[0]->mutable_cpu_data()[out_count++];
image.data[count] = image.data[count] > 255 ? 255 : image.data[count];
count++;
}
}
std::stringstream im_name;
im_name << "/home/isl-server/ashish/op_feature_maps/" << instance << "_" << f_map << ".png";
cv::imwrite(im_name.str(),image);
f_map++;
}
}
instance++;
}
#endif
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
|
c01cee0f567de7f3171d0af94fceda51a547b6da.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @brief
* ragged
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <hipcub/hipcub.hpp>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/ragged.h"
namespace {
// will be used in RaggedShape::MaxSize(int32_t axis) to call
// hipcub::DeviceReduce::Max
struct RowSplitsDiff {
const int32_t *row_splits_data;
explicit RowSplitsDiff(const int32_t *row_splits)
: row_splits_data(row_splits) {}
// operator[] and operator+ are required by hipcub::DeviceReduce::Max
__device__ int32_t operator[](int32_t i) const {
return row_splits_data[i + 1] - row_splits_data[i];
}
__device__ RowSplitsDiff operator+(int32_t n) const {
RowSplitsDiff tmp(*this);
tmp.row_splits_data += n;
return tmp;
}
};
} // namespace
namespace std {
// vaule_type is required by hipcub::DeviceReduce::Max
template <>
struct iterator_traits<::RowSplitsDiff> {
typedef int32_t value_type;
};
} // namespace std
namespace k2 {
// Recursive function that prints (part of) a ragged shape.
// 0 <= begin_pos <= end_pos < shape.TotSize(axis).
void PrintRaggedShapePart(std::ostream &stream, const RaggedShape &shape,
int32_t axis, int32_t begin_pos, int32_t end_pos) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(axis >= 0 && axis < shape.NumAxes() && begin_pos >= 0 &&
begin_pos <= end_pos && end_pos <= shape.TotSize(axis));
for (int32_t d = begin_pos; d < end_pos; ++d) {
if (axis == shape.NumAxes() - 1) {
stream << "x ";
} else {
stream << "[ ";
const int32_t *row_splits = shape.RowSplits(axis + 1).Data();
K2_DCHECK(d < shape.RowSplits(axis + 1).Dim());
int32_t row_start = row_splits[d], row_end = row_splits[d + 1];
PrintRaggedShapePart(stream, shape, axis + 1, row_start, row_end);
stream << "] ";
}
}
}
// prints a RaggedShape as e.g. [ [ 0 1 ] [ 2 ] [] ]. Note, the 'values'
// are just the positions in the array, this is for readability.
std::ostream &operator<<(std::ostream &stream, const RaggedShape &shape) {
if (shape.Context()->GetDeviceType() != kCpu) {
return stream << shape.To(GetCpuContext());
} else {
bool print_warnings = false;
if (shape.Validate(print_warnings)) {
stream << "[ ";
PrintRaggedShapePart(stream, shape, 0, 0, shape.Dim0());
stream << "]";
return stream;
} else {
// For non-valid shapes, print the raw info.
stream << "Invalid RaggedShape: { ";
stream << " num-axes = " << shape.NumAxes();
for (int32_t i = 1; i < shape.NumAxes(); i++) {
const RaggedShapeLayer &layer = shape.Layers()[i - 1];
if (layer.row_splits.IsValid())
stream << " RowSplits(" << i << ")=" << layer.row_splits;
if (layer.row_ids.IsValid())
stream << "RowIds(" << i << ")=" << layer.row_ids;
stream << "cached_tot_size[" << i << "]=" << layer.cached_tot_size;
}
return stream << " }";
}
}
}
Array1<int32_t> &RaggedShape::RowIds(int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
RaggedShapeLayer &rsd = layers_[axis - 1];
auto &row_splits = rsd.row_splits;
auto &row_ids = rsd.row_ids;
// there must be row_splits.Dim() >=1 according to the definition of
// RaggedShapeLayer.
K2_CHECK_GE(row_splits.Dim(), 1);
if (!row_ids.IsValid()) {
if (rsd.cached_tot_size < 0)
rsd.cached_tot_size = row_splits[row_splits.Dim() - 1];
// create row_ids as it does not exist
row_ids = Array1<int32_t>(Context(), rsd.cached_tot_size);
const int32_t *row_splits_data = row_splits.Data();
int32_t *row_ids_data = row_ids.Data();
RowSplitsToRowIds(Context(), row_splits.Dim() - 1, row_splits_data,
row_ids.Dim(), row_ids_data);
}
return row_ids;
}
int32_t RaggedShape::MaxSize(int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
const auto &row_splits = layers_[axis - 1].row_splits;
const int32_t num_rows = row_splits.Dim() - 1;
if (num_rows == 0) return 0;
const int32_t *row_splits_data = row_splits.Data();
ContextPtr c = Context();
if (c->GetDeviceType() == kCpu) {
int32_t max_value = 0;
for (int32_t i = 0; i < num_rows; ++i) {
int32_t value = row_splits_data[i + 1] - row_splits_data[i];
if (value > max_value) max_value = value;
}
return max_value;
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
::RowSplitsDiff row_splits_diff(row_splits_data);
Array1<int32_t> max_array(Context(), 1, 0);
int32_t *max_value = max_array.Data();
size_t temp_storage_bytes = 0;
// the first time is to determine temporary device storage requirements
K2_CUDA_SAFE_CALL(hipcub::DeviceReduce::Max(nullptr, temp_storage_bytes,
row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
Array1<int8_t> d_temp_storage(c, temp_storage_bytes);
K2_CUDA_SAFE_CALL(hipcub::DeviceReduce::Max(
d_temp_storage.Data(), temp_storage_bytes, row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
// this will convert to memory on CPU
return max_array[0];
}
}
RaggedShape RaggedShape::Index(int32_t axis, int32_t i,
int32_t *value_offset /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
// only support `axis == 0` for now
K2_CHECK_EQ(axis, 0);
K2_CHECK_GE(i, 0);
int32_t num_axes = NumAxes();
K2_CHECK_GT(num_axes, 2);
const auto &src_axes = Layers();
K2_CHECK_LT(i + 1, src_axes[0].row_splits.Dim());
if (i == 0 && Dim0() == 1) {
// Just remove first axis. Common case so we make it efficient.
std::vector<RaggedShapeLayer> ans_axes(src_axes.begin() + 1,
src_axes.end());
if (value_offset) *value_offset = 0;
return RaggedShape(ans_axes, false);
}
int32_t idx_begin = (i != 0 ? src_axes[0].row_splits[i] : 0),
idx_end = src_axes[0].row_splits[i + 1];
std::vector<RaggedShapeLayer> axes(src_axes.size() - 1);
ContextPtr &c = Context();
for (int32_t i = 2; i < num_axes; ++i) {
const Array1<int32_t> &src_row_splits = RowSplits(i),
&src_row_ids = RowIds(i);
int32_t idx_begin_next = (idx_begin != 0 ? src_row_splits[idx_begin] : 0),
idx_end_next = src_row_splits[idx_end];
axes[i - 2].row_splits =
src_row_splits.Range(idx_begin, idx_end - idx_begin + 1);
if (idx_begin_next != 0)
axes[i - 2].row_splits = Minus(axes[i - 2].row_splits, idx_begin_next);
axes[i - 2].row_ids =
src_row_ids.Range(idx_begin_next, idx_end_next - idx_begin_next);
if (idx_begin != 0)
axes[i - 2].row_ids = Minus(axes[i - 2].row_ids, idx_begin);
axes[i - 2].cached_tot_size = idx_end_next - idx_begin_next;
idx_begin = idx_begin_next;
idx_end = idx_end_next;
}
if (value_offset) *value_offset = idx_begin;
return RaggedShape(axes);
}
void RaggedShape::Populate() {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = NumAxes();
ParallelRunner pr(this->Context());
for (int32_t i = 1; i < num_axes; ++i) {
With w(pr.NewStream());
// ignore return values of the following calls.
this->TotSize(i);
this->RowIds(i);
}
}
RaggedShape RaggedShape::To(ContextPtr ctx) const {
NVTX_RANGE(K2_FUNC);
if (ctx->IsCompatible(*Context())) return *this;
std::vector<RaggedShapeLayer> axes(layers_.size());
int32_t num_axes = NumAxes();
for (int32_t i = 1; i < num_axes; ++i) {
axes[i - 1].row_splits = layers_[i - 1].row_splits.To(ctx);
// leave row_ids and cached_tot_size unset
axes[i - 1].cached_tot_size = -1;
}
return RaggedShape(axes);
}
RaggedShapeIndexIterator RaggedShape::Iterator() {
return RaggedShapeIndexIterator(*this);
}
int32_t RaggedShape::operator[](const std::vector<int32_t> &indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(static_cast<int32_t>(indexes.size()), NumAxes());
K2_CHECK_EQ(Context()->GetDeviceType(), kCpu);
int32_t cur_idx = indexes[0];
for (int32_t i = 1; i < NumAxes(); i++) {
Array1<int32_t> &row_splits = layers_[i - 1].row_splits;
K2_CHECK(cur_idx >= 0 && cur_idx + 1 < row_splits.Dim());
cur_idx = row_splits[cur_idx];
cur_idx += indexes[i];
}
return cur_idx;
}
int32_t RaggedShape::TotSize(int32_t axis) const {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(axis, 0);
K2_CHECK_LT(axis, NumAxes());
if (axis == 0)
return Dim0();
else {
const RaggedShapeLayer &rsd = layers_[axis - 1];
if (rsd.cached_tot_size >= 0) {
return rsd.cached_tot_size;
} else {
// if we had row_ids set up, we should have set cached_tot_size.
K2_CHECK_EQ(rsd.row_ids.Dim(), 0);
K2_CHECK_GT(rsd.row_splits.Dim(), 0);
const_cast<RaggedShapeLayer &>(rsd).cached_tot_size =
rsd.row_splits.Back();
return rsd.cached_tot_size;
}
}
}
// TODO(dan): change this so that on error it prints a warning if
// print_warnings==true, and then returns false.
bool RaggedShape::Validate(bool print_warnings) const {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = Context();
int32_t num_axes = layers_.size();
ParallelRunner pr(c);
for (int32_t axis = 0; axis < num_axes; ++axis) {
With w(pr.NewStream());
const RaggedShapeLayer &rsd = layers_[axis];
K2_CHECK_GE(rsd.row_splits.Dim(), 0);
if (rsd.cached_tot_size >= 0) {
if (!(rsd.row_splits.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_splits.Back())) {
if (print_warnings)
K2_LOG(WARNING)
<< "Ragged shape validation failed, row_splits.Back()="
<< rsd.row_splits.Back()
<< " vs. cached-tot-size=" << rsd.cached_tot_size;
return false;
}
if (!((rsd.row_ids.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_ids.Dim()))) {
if (print_warnings)
K2_LOG(WARNING) << "Ragged shape validation failed, row_ids.Dim()="
<< rsd.row_ids.Dim()
<< " vs. cached-tot-size=" << rsd.cached_tot_size;
return false;
}
} else {
if (rsd.cached_tot_size != -1 || rsd.row_ids.Dim() != 0) {
if (print_warnings)
K2_LOG(WARNING) << "Ragged shape validation failed, cached_tot_size="
<< rsd.cached_tot_size
<< ", row-ids.Dim()=" << rsd.row_ids.Dim();
return false;
}
}
int32_t num_elems;
// Check row_splits.
{
// meta[0] is a bool, ok == 1, not-ok == 0.
// meta[1] will contain the number of row_splits.
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *num_elems_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data();
int32_t num_rows = rsd.row_splits.Dim() - 1;
K2_EVAL(
c, num_rows + 1, lambda_check_row_splits, (int32_t i)->void {
int32_t this_idx = row_splits_data[i];
if (i == 0 && this_idx != 0) *ok_data = 0;
if (i < num_rows) {
int32_t next_idx = row_splits_data[i + 1];
if (next_idx < this_idx) *ok_data = 0;
} else {
K2_CHECK(i == num_rows);
*num_elems_data = this_idx;
}
});
meta = meta.To(GetCpuContext());
num_elems = meta[1];
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-splits: for layers_[" << axis
<< "], row_splits = " << rsd.row_splits;
}
if (rsd.cached_tot_size > 0 && rsd.cached_tot_size != num_elems) {
K2_LOG(FATAL) << "Problem validating row-splits: for layers_[" << axis
<< "], row_splits[-1] = " << num_elems
<< " but cached_tot_size == " << rsd.cached_tot_size;
}
}
if (axis + 1 < num_axes) {
int32_t next_num_rows = layers_[axis + 1].row_splits.Dim() - 1;
if (num_elems != next_num_rows) {
K2_LOG(FATAL) << "Ragged shape has num_elems for layers_[" << axis
<< "] == " << num_elems << " and num-rows for layers_["
<< (axis + 1) << "] == " << next_num_rows;
}
}
if (rsd.row_ids.Dim() != 0) { // check row_ids.
K2_CHECK(IsCompatible(rsd.row_ids, rsd.row_splits));
// 1st elem is `ok` (1 or 0); 2nd elem is location of bad index
// into row_splits
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *bad_index_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data(),
*row_ids_data = rsd.row_ids.Data();
int32_t num_elems_from_row_ids = rsd.row_ids.Dim(),
num_rows = rsd.row_splits.Dim() - 1;
K2_CHECK_EQ(num_elems, num_elems_from_row_ids);
// TODO: could do this and the other one in separate streams.
K2_EVAL(
c, num_elems, lambda_check_row_ids, (int32_t i)->void {
int32_t this_row = row_ids_data[i];
if (this_row < 0 || this_row >= num_rows ||
i < row_splits_data[this_row] ||
i >= row_splits_data[this_row + 1]) {
*ok_data = 0;
*bad_index_data = i;
}
});
meta = meta.To(GetCpuContext()); // since we have 2 accesses, this should
// be faster.
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-ids: for layers_[" << axis
<< "], row_splits = " << rsd.row_splits
<< ", row_ids = " << rsd.row_ids << ", see index "
<< meta[1] << " of row_ids, whose dim is "
<< rsd.row_ids.Dim();
}
}
if (axis + 1 < (int32_t)layers_.size()) {
K2_CHECK(IsCompatible(rsd.row_splits, layers_[axis + 1].row_splits));
}
}
return true;
}
bool Equal(const RaggedShape &a, const RaggedShape &b) {
NVTX_RANGE(K2_FUNC);
if (a.NumAxes() != b.NumAxes()) return false;
for (int32_t i = 1; i < a.NumAxes(); i++) {
if (a.RowSplits(i).Dim() != b.RowSplits(i).Dim() ||
!Equal(a.RowSplits(i), b.RowSplits(i)))
return false;
}
return true;
}
std::istream &operator>>(std::istream &is, RaggedShape &shape) {
NVTX_RANGE(K2_FUNC);
// Note: element 0 of 'row_splits' will end up being
// discarded; the others will become the axes of `shape`.
std::vector<std::vector<int32_t>> row_splits;
int32_t cur_level = 0, num_elems = 0;
while (1) {
is >> std::ws; // eat whitespace
if (!is.good()) {
is.setstate(std::ios::failbit);
return is;
}
int c = is.get();
if (c == static_cast<int32_t>('[')) {
cur_level++;
while (row_splits.size() < static_cast<size_t>(cur_level)) {
if (num_elems != 0) {
is.setstate(std::ios::failbit);
return is;
}
row_splits.push_back(std::vector<int32_t>(1, 0));
}
} else if (c == static_cast<int32_t>(']')) {
cur_level--;
if (cur_level <= 0) { // Done; return...
if (cur_level < 0) { // ']' without '['.
is.setstate(std::ios::failbit);
return is;
}
row_splits.erase(row_splits.begin());
if (row_splits.empty()) {
// Assume 2 axes even though the num-axes is ambiguous from the input.
// row_splits is 0 0.
row_splits.push_back(std::vector<int32_t>(1, 0));
}
std::vector<RaggedShapeLayer> axes(row_splits.size());
for (size_t i = 0; i < row_splits.size(); i++) {
axes[i].row_splits = Array1<int32_t>(GetCpuContext(), row_splits[i]);
axes[i].cached_tot_size = -1;
}
shape = RaggedShape(axes);
return is;
}
row_splits[cur_level].push_back(
(cur_level + 1 >= (int32_t)row_splits.size())
? num_elems
: (row_splits[cur_level + 1].size() - 1));
} else if (c == static_cast<int32_t>('x')) {
if (cur_level != static_cast<int32_t>(row_splits.size()) ||
cur_level < 2) {
is.setstate(std::ios::failbit);
return is;
}
num_elems++;
} else {
is.setstate(std::ios::failbit);
return is;
}
}
}
} // namespace k2
|
c01cee0f567de7f3171d0af94fceda51a547b6da.cu
|
/**
* @brief
* ragged
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <cub/cub.cuh>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/ragged.h"
namespace {
// will be used in RaggedShape::MaxSize(int32_t axis) to call
// cub::DeviceReduce::Max
struct RowSplitsDiff {
const int32_t *row_splits_data;
explicit RowSplitsDiff(const int32_t *row_splits)
: row_splits_data(row_splits) {}
// operator[] and operator+ are required by cub::DeviceReduce::Max
__device__ int32_t operator[](int32_t i) const {
return row_splits_data[i + 1] - row_splits_data[i];
}
__device__ RowSplitsDiff operator+(int32_t n) const {
RowSplitsDiff tmp(*this);
tmp.row_splits_data += n;
return tmp;
}
};
} // namespace
namespace std {
// vaule_type is required by cub::DeviceReduce::Max
template <>
struct iterator_traits<::RowSplitsDiff> {
typedef int32_t value_type;
};
} // namespace std
namespace k2 {
// Recursive function that prints (part of) a ragged shape.
// 0 <= begin_pos <= end_pos < shape.TotSize(axis).
void PrintRaggedShapePart(std::ostream &stream, const RaggedShape &shape,
int32_t axis, int32_t begin_pos, int32_t end_pos) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(axis >= 0 && axis < shape.NumAxes() && begin_pos >= 0 &&
begin_pos <= end_pos && end_pos <= shape.TotSize(axis));
for (int32_t d = begin_pos; d < end_pos; ++d) {
if (axis == shape.NumAxes() - 1) {
stream << "x ";
} else {
stream << "[ ";
const int32_t *row_splits = shape.RowSplits(axis + 1).Data();
K2_DCHECK(d < shape.RowSplits(axis + 1).Dim());
int32_t row_start = row_splits[d], row_end = row_splits[d + 1];
PrintRaggedShapePart(stream, shape, axis + 1, row_start, row_end);
stream << "] ";
}
}
}
// prints a RaggedShape as e.g. [ [ 0 1 ] [ 2 ] [] ]. Note, the 'values'
// are just the positions in the array, this is for readability.
std::ostream &operator<<(std::ostream &stream, const RaggedShape &shape) {
if (shape.Context()->GetDeviceType() != kCpu) {
return stream << shape.To(GetCpuContext());
} else {
bool print_warnings = false;
if (shape.Validate(print_warnings)) {
stream << "[ ";
PrintRaggedShapePart(stream, shape, 0, 0, shape.Dim0());
stream << "]";
return stream;
} else {
// For non-valid shapes, print the raw info.
stream << "Invalid RaggedShape: { ";
stream << " num-axes = " << shape.NumAxes();
for (int32_t i = 1; i < shape.NumAxes(); i++) {
const RaggedShapeLayer &layer = shape.Layers()[i - 1];
if (layer.row_splits.IsValid())
stream << " RowSplits(" << i << ")=" << layer.row_splits;
if (layer.row_ids.IsValid())
stream << "RowIds(" << i << ")=" << layer.row_ids;
stream << "cached_tot_size[" << i << "]=" << layer.cached_tot_size;
}
return stream << " }";
}
}
}
Array1<int32_t> &RaggedShape::RowIds(int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
RaggedShapeLayer &rsd = layers_[axis - 1];
auto &row_splits = rsd.row_splits;
auto &row_ids = rsd.row_ids;
// there must be row_splits.Dim() >=1 according to the definition of
// RaggedShapeLayer.
K2_CHECK_GE(row_splits.Dim(), 1);
if (!row_ids.IsValid()) {
if (rsd.cached_tot_size < 0)
rsd.cached_tot_size = row_splits[row_splits.Dim() - 1];
// create row_ids as it does not exist
row_ids = Array1<int32_t>(Context(), rsd.cached_tot_size);
const int32_t *row_splits_data = row_splits.Data();
int32_t *row_ids_data = row_ids.Data();
RowSplitsToRowIds(Context(), row_splits.Dim() - 1, row_splits_data,
row_ids.Dim(), row_ids_data);
}
return row_ids;
}
int32_t RaggedShape::MaxSize(int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, NumAxes());
const auto &row_splits = layers_[axis - 1].row_splits;
const int32_t num_rows = row_splits.Dim() - 1;
if (num_rows == 0) return 0;
const int32_t *row_splits_data = row_splits.Data();
ContextPtr c = Context();
if (c->GetDeviceType() == kCpu) {
int32_t max_value = 0;
for (int32_t i = 0; i < num_rows; ++i) {
int32_t value = row_splits_data[i + 1] - row_splits_data[i];
if (value > max_value) max_value = value;
}
return max_value;
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
::RowSplitsDiff row_splits_diff(row_splits_data);
Array1<int32_t> max_array(Context(), 1, 0);
int32_t *max_value = max_array.Data();
size_t temp_storage_bytes = 0;
// the first time is to determine temporary device storage requirements
K2_CUDA_SAFE_CALL(cub::DeviceReduce::Max(nullptr, temp_storage_bytes,
row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
Array1<int8_t> d_temp_storage(c, temp_storage_bytes);
K2_CUDA_SAFE_CALL(cub::DeviceReduce::Max(
d_temp_storage.Data(), temp_storage_bytes, row_splits_diff, max_value,
num_rows, c->GetCudaStream()));
// this will convert to memory on CPU
return max_array[0];
}
}
RaggedShape RaggedShape::Index(int32_t axis, int32_t i,
int32_t *value_offset /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
// only support `axis == 0` for now
K2_CHECK_EQ(axis, 0);
K2_CHECK_GE(i, 0);
int32_t num_axes = NumAxes();
K2_CHECK_GT(num_axes, 2);
const auto &src_axes = Layers();
K2_CHECK_LT(i + 1, src_axes[0].row_splits.Dim());
if (i == 0 && Dim0() == 1) {
// Just remove first axis. Common case so we make it efficient.
std::vector<RaggedShapeLayer> ans_axes(src_axes.begin() + 1,
src_axes.end());
if (value_offset) *value_offset = 0;
return RaggedShape(ans_axes, false);
}
int32_t idx_begin = (i != 0 ? src_axes[0].row_splits[i] : 0),
idx_end = src_axes[0].row_splits[i + 1];
std::vector<RaggedShapeLayer> axes(src_axes.size() - 1);
ContextPtr &c = Context();
for (int32_t i = 2; i < num_axes; ++i) {
const Array1<int32_t> &src_row_splits = RowSplits(i),
&src_row_ids = RowIds(i);
int32_t idx_begin_next = (idx_begin != 0 ? src_row_splits[idx_begin] : 0),
idx_end_next = src_row_splits[idx_end];
axes[i - 2].row_splits =
src_row_splits.Range(idx_begin, idx_end - idx_begin + 1);
if (idx_begin_next != 0)
axes[i - 2].row_splits = Minus(axes[i - 2].row_splits, idx_begin_next);
axes[i - 2].row_ids =
src_row_ids.Range(idx_begin_next, idx_end_next - idx_begin_next);
if (idx_begin != 0)
axes[i - 2].row_ids = Minus(axes[i - 2].row_ids, idx_begin);
axes[i - 2].cached_tot_size = idx_end_next - idx_begin_next;
idx_begin = idx_begin_next;
idx_end = idx_end_next;
}
if (value_offset) *value_offset = idx_begin;
return RaggedShape(axes);
}
void RaggedShape::Populate() {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = NumAxes();
ParallelRunner pr(this->Context());
for (int32_t i = 1; i < num_axes; ++i) {
With w(pr.NewStream());
// ignore return values of the following calls.
this->TotSize(i);
this->RowIds(i);
}
}
RaggedShape RaggedShape::To(ContextPtr ctx) const {
NVTX_RANGE(K2_FUNC);
if (ctx->IsCompatible(*Context())) return *this;
std::vector<RaggedShapeLayer> axes(layers_.size());
int32_t num_axes = NumAxes();
for (int32_t i = 1; i < num_axes; ++i) {
axes[i - 1].row_splits = layers_[i - 1].row_splits.To(ctx);
// leave row_ids and cached_tot_size unset
axes[i - 1].cached_tot_size = -1;
}
return RaggedShape(axes);
}
RaggedShapeIndexIterator RaggedShape::Iterator() {
return RaggedShapeIndexIterator(*this);
}
int32_t RaggedShape::operator[](const std::vector<int32_t> &indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(static_cast<int32_t>(indexes.size()), NumAxes());
K2_CHECK_EQ(Context()->GetDeviceType(), kCpu);
int32_t cur_idx = indexes[0];
for (int32_t i = 1; i < NumAxes(); i++) {
Array1<int32_t> &row_splits = layers_[i - 1].row_splits;
K2_CHECK(cur_idx >= 0 && cur_idx + 1 < row_splits.Dim());
cur_idx = row_splits[cur_idx];
cur_idx += indexes[i];
}
return cur_idx;
}
int32_t RaggedShape::TotSize(int32_t axis) const {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(axis, 0);
K2_CHECK_LT(axis, NumAxes());
if (axis == 0)
return Dim0();
else {
const RaggedShapeLayer &rsd = layers_[axis - 1];
if (rsd.cached_tot_size >= 0) {
return rsd.cached_tot_size;
} else {
// if we had row_ids set up, we should have set cached_tot_size.
K2_CHECK_EQ(rsd.row_ids.Dim(), 0);
K2_CHECK_GT(rsd.row_splits.Dim(), 0);
const_cast<RaggedShapeLayer &>(rsd).cached_tot_size =
rsd.row_splits.Back();
return rsd.cached_tot_size;
}
}
}
// TODO(dan): change this so that on error it prints a warning if
// print_warnings==true, and then returns false.
bool RaggedShape::Validate(bool print_warnings) const {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = Context();
int32_t num_axes = layers_.size();
ParallelRunner pr(c);
for (int32_t axis = 0; axis < num_axes; ++axis) {
With w(pr.NewStream());
const RaggedShapeLayer &rsd = layers_[axis];
K2_CHECK_GE(rsd.row_splits.Dim(), 0);
if (rsd.cached_tot_size >= 0) {
if (!(rsd.row_splits.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_splits.Back())) {
if (print_warnings)
K2_LOG(WARNING)
<< "Ragged shape validation failed, row_splits.Back()="
<< rsd.row_splits.Back()
<< " vs. cached-tot-size=" << rsd.cached_tot_size;
return false;
}
if (!((rsd.row_ids.Dim() == 0 ||
rsd.cached_tot_size == rsd.row_ids.Dim()))) {
if (print_warnings)
K2_LOG(WARNING) << "Ragged shape validation failed, row_ids.Dim()="
<< rsd.row_ids.Dim()
<< " vs. cached-tot-size=" << rsd.cached_tot_size;
return false;
}
} else {
if (rsd.cached_tot_size != -1 || rsd.row_ids.Dim() != 0) {
if (print_warnings)
K2_LOG(WARNING) << "Ragged shape validation failed, cached_tot_size="
<< rsd.cached_tot_size
<< ", row-ids.Dim()=" << rsd.row_ids.Dim();
return false;
}
}
int32_t num_elems;
// Check row_splits.
{
// meta[0] is a bool, ok == 1, not-ok == 0.
// meta[1] will contain the number of row_splits.
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *num_elems_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data();
int32_t num_rows = rsd.row_splits.Dim() - 1;
K2_EVAL(
c, num_rows + 1, lambda_check_row_splits, (int32_t i)->void {
int32_t this_idx = row_splits_data[i];
if (i == 0 && this_idx != 0) *ok_data = 0;
if (i < num_rows) {
int32_t next_idx = row_splits_data[i + 1];
if (next_idx < this_idx) *ok_data = 0;
} else {
K2_CHECK(i == num_rows);
*num_elems_data = this_idx;
}
});
meta = meta.To(GetCpuContext());
num_elems = meta[1];
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-splits: for layers_[" << axis
<< "], row_splits = " << rsd.row_splits;
}
if (rsd.cached_tot_size > 0 && rsd.cached_tot_size != num_elems) {
K2_LOG(FATAL) << "Problem validating row-splits: for layers_[" << axis
<< "], row_splits[-1] = " << num_elems
<< " but cached_tot_size == " << rsd.cached_tot_size;
}
}
if (axis + 1 < num_axes) {
int32_t next_num_rows = layers_[axis + 1].row_splits.Dim() - 1;
if (num_elems != next_num_rows) {
K2_LOG(FATAL) << "Ragged shape has num_elems for layers_[" << axis
<< "] == " << num_elems << " and num-rows for layers_["
<< (axis + 1) << "] == " << next_num_rows;
}
}
if (rsd.row_ids.Dim() != 0) { // check row_ids.
K2_CHECK(IsCompatible(rsd.row_ids, rsd.row_splits));
// 1st elem is `ok` (1 or 0); 2nd elem is location of bad index
// into row_splits
Array1<int32_t> meta(c, 2, 1);
int32_t *ok_data = meta.Data(), *bad_index_data = ok_data + 1;
const int32_t *row_splits_data = rsd.row_splits.Data(),
*row_ids_data = rsd.row_ids.Data();
int32_t num_elems_from_row_ids = rsd.row_ids.Dim(),
num_rows = rsd.row_splits.Dim() - 1;
K2_CHECK_EQ(num_elems, num_elems_from_row_ids);
// TODO: could do this and the other one in separate streams.
K2_EVAL(
c, num_elems, lambda_check_row_ids, (int32_t i)->void {
int32_t this_row = row_ids_data[i];
if (this_row < 0 || this_row >= num_rows ||
i < row_splits_data[this_row] ||
i >= row_splits_data[this_row + 1]) {
*ok_data = 0;
*bad_index_data = i;
}
});
meta = meta.To(GetCpuContext()); // since we have 2 accesses, this should
// be faster.
int32_t ok = meta[0];
if (!ok) {
K2_LOG(FATAL) << "Problem validating row-ids: for layers_[" << axis
<< "], row_splits = " << rsd.row_splits
<< ", row_ids = " << rsd.row_ids << ", see index "
<< meta[1] << " of row_ids, whose dim is "
<< rsd.row_ids.Dim();
}
}
if (axis + 1 < (int32_t)layers_.size()) {
K2_CHECK(IsCompatible(rsd.row_splits, layers_[axis + 1].row_splits));
}
}
return true;
}
bool Equal(const RaggedShape &a, const RaggedShape &b) {
NVTX_RANGE(K2_FUNC);
if (a.NumAxes() != b.NumAxes()) return false;
for (int32_t i = 1; i < a.NumAxes(); i++) {
if (a.RowSplits(i).Dim() != b.RowSplits(i).Dim() ||
!Equal(a.RowSplits(i), b.RowSplits(i)))
return false;
}
return true;
}
std::istream &operator>>(std::istream &is, RaggedShape &shape) {
NVTX_RANGE(K2_FUNC);
// Note: element 0 of 'row_splits' will end up being
// discarded; the others will become the axes of `shape`.
std::vector<std::vector<int32_t>> row_splits;
int32_t cur_level = 0, num_elems = 0;
while (1) {
is >> std::ws; // eat whitespace
if (!is.good()) {
is.setstate(std::ios::failbit);
return is;
}
int c = is.get();
if (c == static_cast<int32_t>('[')) {
cur_level++;
while (row_splits.size() < static_cast<size_t>(cur_level)) {
if (num_elems != 0) {
is.setstate(std::ios::failbit);
return is;
}
row_splits.push_back(std::vector<int32_t>(1, 0));
}
} else if (c == static_cast<int32_t>(']')) {
cur_level--;
if (cur_level <= 0) { // Done; return...
if (cur_level < 0) { // ']' without '['.
is.setstate(std::ios::failbit);
return is;
}
row_splits.erase(row_splits.begin());
if (row_splits.empty()) {
// Assume 2 axes even though the num-axes is ambiguous from the input.
// row_splits is 0 0.
row_splits.push_back(std::vector<int32_t>(1, 0));
}
std::vector<RaggedShapeLayer> axes(row_splits.size());
for (size_t i = 0; i < row_splits.size(); i++) {
axes[i].row_splits = Array1<int32_t>(GetCpuContext(), row_splits[i]);
axes[i].cached_tot_size = -1;
}
shape = RaggedShape(axes);
return is;
}
row_splits[cur_level].push_back(
(cur_level + 1 >= (int32_t)row_splits.size())
? num_elems
: (row_splits[cur_level + 1].size() - 1));
} else if (c == static_cast<int32_t>('x')) {
if (cur_level != static_cast<int32_t>(row_splits.size()) ||
cur_level < 2) {
is.setstate(std::ios::failbit);
return is;
}
num_elems++;
} else {
is.setstate(std::ios::failbit);
return is;
}
}
}
} // namespace k2
|
ea1dd302b9cc3abfe78ebd81ae8f57ac6b63a5b7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file nnnormalizelp_gpu.cu
// @brief Batch normalization block
// @author Andrea Vedaldi
/*
Copyright (C) 2017 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "nnnormalizelp.hpp"
#include "datacu.hpp"
#include <vector>
#include <algorithm>
// -------------------------------------------------------------------
// Helpers
// -------------------------------------------------------------------
struct GPUVisitPattern
{
size_t normsVolume ;
size_t inputVolume ;
int dims [4] {1,1,1,1} ;
int strides [4] {0,0,0,0} ;
int ndims [4] {1,1,1,1} ;
int nstrides [4] {0,0,0,0} ;
} ;
GPUVisitPattern getGPUVisitPatternForInput(NormalizeLp const & op, vl::Tensor input)
{
// Compute tensor geometry.
int n = input.getNumDimensions() ;
auto inputDimensions = std::vector<size_t>(input.getDimensions(),
input.getDimensions() + n) ;
assert(n <= 4) ; // Todo: relax.
size_t inputVolume = 1 ;
size_t normsVolume = 1 ;
auto dims = std::vector<ptrdiff_t>{} ;
auto steps = std::vector<ptrdiff_t>{} ;
auto ndims = std::vector<ptrdiff_t>{} ;
auto nstrides = std::vector<ptrdiff_t>{} ;
// Find out how to traverse the reduced results as the input is
// scanned from first to last element.
for (int d = 0 ; d < n ; ++d) {
bool squashed =
(find(op.selectedDimensions.begin(), op.selectedDimensions.end(), d) !=
op.selectedDimensions.end()) ;
if (squashed) {
dims.push_back(inputDimensions[d]) ;
steps.push_back(inputVolume) ;
} else {
ndims.push_back(inputDimensions[d]) ;
nstrides.push_back(inputVolume) ;
normsVolume *= inputDimensions[d] ;
}
inputVolume *= inputDimensions[d] ;
}
//cout << steps.size() << " " << inputVolume << endl ;
for (int d = steps.size() ; d < 5 ; ++d) {
steps.push_back(inputVolume) ;
dims.push_back(1) ;
}
for (int d = 3 ; d >= 0 ; d--) {
steps[d+1] -= steps[d] * dims[d] ;
}
GPUVisitPattern vp ;
vp.inputVolume = inputVolume ;
vp.normsVolume = normsVolume ;
std::copy(dims.begin(),dims.end(),vp.dims) ;
std::copy(steps.begin(),steps.end(),vp.strides) ;
std::copy(ndims.begin(),ndims.end(),vp.ndims) ;
std::copy(nstrides.begin(),nstrides.end(),vp.nstrides) ;
return vp ;
}
template<typename type> __global__ void
computeNorms(type * normsData,
type const * inputData,
type exponent,
type epsilon,
GPUVisitPattern vp)
{
int tid = threadIdx.x ;
if (tid >= vp.normsVolume) { return ; }
normsData += tid ;
int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ;
int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ;
int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ;
int i3 = tid % vp.ndims[3] ;
inputData +=
i0 * vp.nstrides[0] +
i1 * vp.nstrides[1] +
i2 * vp.nstrides[2] +
i3 * vp.nstrides[3] ;
type value = 0 ;
for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) {
for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) {
for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) {
for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) {
value = value + pow(*inputData, exponent) ;
inputData += vp.strides[0] ;
}
inputData += vp.strides[1] ;
}
inputData += vp.strides[2] ;
}
inputData += vp.strides[3] ;
}
*normsData = pow(value + epsilon, static_cast<type>(1.0)/exponent) ;
}
template<typename type> __global__ void
divideByNorms(type * outputData,
type const * inputData,
type const * normsData,
GPUVisitPattern vp)
{
int tid = threadIdx.x ;
if (tid >= vp.normsVolume) { return ; }
normsData += tid ;
int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ;
int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ;
int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ;
int i3 = tid % vp.ndims[3] ;
int offset =
i0 * vp.nstrides[0] +
i1 * vp.nstrides[1] +
i2 * vp.nstrides[2] +
i3 * vp.nstrides[3] ;
inputData += offset ;
outputData += offset ;
type value = *normsData ;
for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) {
for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) {
for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) {
for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) {
*outputData = *inputData / value ;
inputData += vp.strides[0] ;
outputData += vp.strides[0] ;
}
inputData += vp.strides[1] ;
outputData += vp.strides[1] ;
}
inputData += vp.strides[2] ;
outputData += vp.strides[2] ;
}
inputData += vp.strides[3] ;
outputData += vp.strides[3] ;
}
}
template<typename type> __global__ void
computeSum(type * scratchData,
type const * inputData,
type const * derOutputData,
GPUVisitPattern vp)
{
int tid = threadIdx.x ;
if (tid >= vp.normsVolume) { return ; }
scratchData += tid ;
int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ;
int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ;
int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ;
int i3 = tid % vp.ndims[3] ;
int offset =
i0 * vp.nstrides[0] +
i1 * vp.nstrides[1] +
i2 * vp.nstrides[2] +
i3 * vp.nstrides[3] ;
inputData += offset ;
derOutputData += offset ;
type value = 0 ;
for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) {
for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) {
for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) {
for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) {
value += (*inputData) * (*derOutputData) ;
inputData += vp.strides[0] ;
derOutputData += vp.strides[0] ;
}
inputData += vp.strides[1] ;
derOutputData += vp.strides[1] ;
}
inputData += vp.strides[2] ;
derOutputData += vp.strides[2] ;
}
inputData += vp.strides[3] ;
derOutputData += vp.strides[3] ;
}
*scratchData = value ;
}
template<typename type> __global__ void
computeDerInput(type * derInputData,
type const * inputData,
type const * normsData,
type const * derOutputData,
type const * scratchData,
type exponent,
GPUVisitPattern vp)
{
int tid = threadIdx.x ;
if (tid >= vp.normsVolume) { return ; }
normsData += tid ;
scratchData += tid ;
int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ;
int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ;
int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ;
int i3 = tid % vp.ndims[3] ;
int offset =
i0 * vp.nstrides[0] +
i1 * vp.nstrides[1] +
i2 * vp.nstrides[2] +
i3 * vp.nstrides[3] ;
derInputData += offset ;
inputData += offset ;
derOutputData += offset ;
type const nv = *normsData ;
type const sv = *scratchData ;
for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) {
for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) {
for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) {
for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) {
type iv = *inputData ;
type dov = *derOutputData ;
*derInputData = dov / nv - sv * pow(iv,exponent-1) / pow(nv,exponent+1) ;
derInputData += vp.strides[0] ;
inputData += vp.strides[0] ;
derOutputData += vp.strides[0] ;
}
derInputData += vp.strides[1] ;
inputData += vp.strides[1] ;
derOutputData += vp.strides[1] ;
}
derInputData += vp.strides[2] ;
inputData += vp.strides[2] ;
derOutputData += vp.strides[2] ;
}
derInputData += vp.strides[3] ;
inputData += vp.strides[3] ;
derOutputData += vp.strides[3] ;
}
}
// -------------------------------------------------------------------
// GPU forward
// -------------------------------------------------------------------
template<vl::DataType dataType, bool givenNorms>
struct NormalizeLpForwardGPU
{
vl::ErrorCode operator()(NormalizeLp & op,
Tensor &output,
typename NormAgrument<givenNorms>::type norms,
Tensor const &input)
{
assert(norms || !givenNorms) ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto vp = getGPUVisitPatternForInput(op,input) ;
// Get buffers.
type const * inputData = (type const*)input.getMemory() ;
type * normsData ;
if (norms) {
normsData = (type*)norms.getMemory() ;
}
else {
normsData = (type*)op.context.getWorkspace
(vl::VLDT_GPU, vp.normsVolume * sizeof(type)) ;
}
// Accumulate norms.
if (!givenNorms) {
hipLaunchKernelGGL(( computeNorms<type>)
, dim3(divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
normsData,inputData,op.exponent,op.epsilon,vp) ;
}
// Divide by them.
type * outputData = (type*)output.getMemory() ;
hipLaunchKernelGGL(( divideByNorms<type>)
, dim3(divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
outputData,inputData,normsData,vp) ;
//cout << "n vol " << vp.normsVolume << endl ;
return vl::VLE_Success ;
}
} ;
template<vl::DataType dataType>
struct NormalizeLpForward<vl::VLDT_GPU, dataType>
: public NormalizeLpForwardGPU<dataType,false>
{ } ;
template<vl::DataType dataType>
struct NormalizeLpForwardWithNorms<vl::VLDT_GPU, dataType>
: public NormalizeLpForwardGPU<dataType,true>
{ } ;
// -------------------------------------------------------------------
// GPU backward
// -------------------------------------------------------------------
template<vl::DataType dataType, bool givenNorms>
struct NormalizeLpBackwardGPU
{
vl::ErrorCode operator()(NormalizeLp &op,
Tensor &derInput,
typename NormAgrument<givenNorms>::type norms,
Tensor const &input,
Tensor const& derOutput)
{
assert(norms || !givenNorms) ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto vp = getGPUVisitPatternForInput(op,input) ;
// Get buffers.
size_t workspaceSize = vp.normsVolume * sizeof(type) ;
type const * inputData = (type const*)input.getMemory() ;
type * normsData ;
if (norms) {
normsData = (type*)norms.getMemory() ;
}
else {
normsData = 0 ;
workspaceSize *= 2 ;
}
type * scratchData = (type*)op.context.getWorkspace(vl::VLDT_GPU, workspaceSize) ;
if (normsData == NULL) {
normsData = scratchData + vp.normsVolume ;
}
// Accumulate norms.
if (!givenNorms) {
hipLaunchKernelGGL(( computeNorms<type>)
, dim3(divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
normsData,inputData,op.exponent,op.epsilon,vp) ;
}
// Compute sum(derOutput .* input).
type const* derOutputData = (type const*)derOutput.getMemory() ;
hipLaunchKernelGGL(( computeSum<type>)
, dim3(divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
scratchData,inputData,derOutputData,vp) ;
// Compute derInputs.
type * derInputData = (type*)derInput.getMemory() ;
hipLaunchKernelGGL(( computeDerInput<type>)
, dim3(divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
derInputData,inputData,normsData,derOutputData,scratchData,op.exponent,vp) ;
return vl::VLE_Success ;
}
} ;
template<vl::DataType dataType>
struct NormalizeLpBackward<vl::VLDT_GPU, dataType>
: public NormalizeLpBackwardGPU<dataType,false>
{ } ;
template<vl::DataType dataType>
struct NormalizeLpBackwardWithNorms<vl::VLDT_GPU, dataType>
: public NormalizeLpBackwardGPU<dataType,true>
{ } ;
|
ea1dd302b9cc3abfe78ebd81ae8f57ac6b63a5b7.cu
|
// @file nnnormalizelp_gpu.cu
// @brief Batch normalization block
// @author Andrea Vedaldi
/*
Copyright (C) 2017 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "nnnormalizelp.hpp"
#include "datacu.hpp"
#include <vector>
#include <algorithm>
// -------------------------------------------------------------------
// Helpers
// -------------------------------------------------------------------
struct GPUVisitPattern
{
size_t normsVolume ;
size_t inputVolume ;
int dims [4] {1,1,1,1} ;
int strides [4] {0,0,0,0} ;
int ndims [4] {1,1,1,1} ;
int nstrides [4] {0,0,0,0} ;
} ;
GPUVisitPattern getGPUVisitPatternForInput(NormalizeLp const & op, vl::Tensor input)
{
// Compute tensor geometry.
int n = input.getNumDimensions() ;
auto inputDimensions = std::vector<size_t>(input.getDimensions(),
input.getDimensions() + n) ;
assert(n <= 4) ; // Todo: relax.
size_t inputVolume = 1 ;
size_t normsVolume = 1 ;
auto dims = std::vector<ptrdiff_t>{} ;
auto steps = std::vector<ptrdiff_t>{} ;
auto ndims = std::vector<ptrdiff_t>{} ;
auto nstrides = std::vector<ptrdiff_t>{} ;
// Find out how to traverse the reduced results as the input is
// scanned from first to last element.
for (int d = 0 ; d < n ; ++d) {
bool squashed =
(find(op.selectedDimensions.begin(), op.selectedDimensions.end(), d) !=
op.selectedDimensions.end()) ;
if (squashed) {
dims.push_back(inputDimensions[d]) ;
steps.push_back(inputVolume) ;
} else {
ndims.push_back(inputDimensions[d]) ;
nstrides.push_back(inputVolume) ;
normsVolume *= inputDimensions[d] ;
}
inputVolume *= inputDimensions[d] ;
}
//cout << steps.size() << " " << inputVolume << endl ;
for (int d = steps.size() ; d < 5 ; ++d) {
steps.push_back(inputVolume) ;
dims.push_back(1) ;
}
for (int d = 3 ; d >= 0 ; d--) {
steps[d+1] -= steps[d] * dims[d] ;
}
GPUVisitPattern vp ;
vp.inputVolume = inputVolume ;
vp.normsVolume = normsVolume ;
std::copy(dims.begin(),dims.end(),vp.dims) ;
std::copy(steps.begin(),steps.end(),vp.strides) ;
std::copy(ndims.begin(),ndims.end(),vp.ndims) ;
std::copy(nstrides.begin(),nstrides.end(),vp.nstrides) ;
return vp ;
}
template<typename type> __global__ void
computeNorms(type * normsData,
type const * inputData,
type exponent,
type epsilon,
GPUVisitPattern vp)
{
int tid = threadIdx.x ;
if (tid >= vp.normsVolume) { return ; }
normsData += tid ;
int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ;
int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ;
int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ;
int i3 = tid % vp.ndims[3] ;
inputData +=
i0 * vp.nstrides[0] +
i1 * vp.nstrides[1] +
i2 * vp.nstrides[2] +
i3 * vp.nstrides[3] ;
type value = 0 ;
for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) {
for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) {
for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) {
for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) {
value = value + pow(*inputData, exponent) ;
inputData += vp.strides[0] ;
}
inputData += vp.strides[1] ;
}
inputData += vp.strides[2] ;
}
inputData += vp.strides[3] ;
}
*normsData = pow(value + epsilon, static_cast<type>(1.0)/exponent) ;
}
template<typename type> __global__ void
divideByNorms(type * outputData,
type const * inputData,
type const * normsData,
GPUVisitPattern vp)
{
int tid = threadIdx.x ;
if (tid >= vp.normsVolume) { return ; }
normsData += tid ;
int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ;
int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ;
int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ;
int i3 = tid % vp.ndims[3] ;
int offset =
i0 * vp.nstrides[0] +
i1 * vp.nstrides[1] +
i2 * vp.nstrides[2] +
i3 * vp.nstrides[3] ;
inputData += offset ;
outputData += offset ;
type value = *normsData ;
for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) {
for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) {
for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) {
for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) {
*outputData = *inputData / value ;
inputData += vp.strides[0] ;
outputData += vp.strides[0] ;
}
inputData += vp.strides[1] ;
outputData += vp.strides[1] ;
}
inputData += vp.strides[2] ;
outputData += vp.strides[2] ;
}
inputData += vp.strides[3] ;
outputData += vp.strides[3] ;
}
}
template<typename type> __global__ void
computeSum(type * scratchData,
type const * inputData,
type const * derOutputData,
GPUVisitPattern vp)
{
int tid = threadIdx.x ;
if (tid >= vp.normsVolume) { return ; }
scratchData += tid ;
int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ;
int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ;
int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ;
int i3 = tid % vp.ndims[3] ;
int offset =
i0 * vp.nstrides[0] +
i1 * vp.nstrides[1] +
i2 * vp.nstrides[2] +
i3 * vp.nstrides[3] ;
inputData += offset ;
derOutputData += offset ;
type value = 0 ;
for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) {
for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) {
for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) {
for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) {
value += (*inputData) * (*derOutputData) ;
inputData += vp.strides[0] ;
derOutputData += vp.strides[0] ;
}
inputData += vp.strides[1] ;
derOutputData += vp.strides[1] ;
}
inputData += vp.strides[2] ;
derOutputData += vp.strides[2] ;
}
inputData += vp.strides[3] ;
derOutputData += vp.strides[3] ;
}
*scratchData = value ;
}
template<typename type> __global__ void
computeDerInput(type * derInputData,
type const * inputData,
type const * normsData,
type const * derOutputData,
type const * scratchData,
type exponent,
GPUVisitPattern vp)
{
int tid = threadIdx.x ;
if (tid >= vp.normsVolume) { return ; }
normsData += tid ;
scratchData += tid ;
int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ;
int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ;
int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ;
int i3 = tid % vp.ndims[3] ;
int offset =
i0 * vp.nstrides[0] +
i1 * vp.nstrides[1] +
i2 * vp.nstrides[2] +
i3 * vp.nstrides[3] ;
derInputData += offset ;
inputData += offset ;
derOutputData += offset ;
type const nv = *normsData ;
type const sv = *scratchData ;
for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) {
for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) {
for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) {
for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) {
type iv = *inputData ;
type dov = *derOutputData ;
*derInputData = dov / nv - sv * pow(iv,exponent-1) / pow(nv,exponent+1) ;
derInputData += vp.strides[0] ;
inputData += vp.strides[0] ;
derOutputData += vp.strides[0] ;
}
derInputData += vp.strides[1] ;
inputData += vp.strides[1] ;
derOutputData += vp.strides[1] ;
}
derInputData += vp.strides[2] ;
inputData += vp.strides[2] ;
derOutputData += vp.strides[2] ;
}
derInputData += vp.strides[3] ;
inputData += vp.strides[3] ;
derOutputData += vp.strides[3] ;
}
}
// -------------------------------------------------------------------
// GPU forward
// -------------------------------------------------------------------
template<vl::DataType dataType, bool givenNorms>
struct NormalizeLpForwardGPU
{
vl::ErrorCode operator()(NormalizeLp & op,
Tensor &output,
typename NormAgrument<givenNorms>::type norms,
Tensor const &input)
{
assert(norms || !givenNorms) ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto vp = getGPUVisitPatternForInput(op,input) ;
// Get buffers.
type const * inputData = (type const*)input.getMemory() ;
type * normsData ;
if (norms) {
normsData = (type*)norms.getMemory() ;
}
else {
normsData = (type*)op.context.getWorkspace
(vl::VLDT_GPU, vp.normsVolume * sizeof(type)) ;
}
// Accumulate norms.
if (!givenNorms) {
computeNorms<type>
<<< divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(normsData,inputData,op.exponent,op.epsilon,vp) ;
}
// Divide by them.
type * outputData = (type*)output.getMemory() ;
divideByNorms<type>
<<< divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(outputData,inputData,normsData,vp) ;
//cout << "n vol " << vp.normsVolume << endl ;
return vl::VLE_Success ;
}
} ;
template<vl::DataType dataType>
struct NormalizeLpForward<vl::VLDT_GPU, dataType>
: public NormalizeLpForwardGPU<dataType,false>
{ } ;
template<vl::DataType dataType>
struct NormalizeLpForwardWithNorms<vl::VLDT_GPU, dataType>
: public NormalizeLpForwardGPU<dataType,true>
{ } ;
// -------------------------------------------------------------------
// GPU backward
// -------------------------------------------------------------------
template<vl::DataType dataType, bool givenNorms>
struct NormalizeLpBackwardGPU
{
vl::ErrorCode operator()(NormalizeLp &op,
Tensor &derInput,
typename NormAgrument<givenNorms>::type norms,
Tensor const &input,
Tensor const& derOutput)
{
assert(norms || !givenNorms) ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto vp = getGPUVisitPatternForInput(op,input) ;
// Get buffers.
size_t workspaceSize = vp.normsVolume * sizeof(type) ;
type const * inputData = (type const*)input.getMemory() ;
type * normsData ;
if (norms) {
normsData = (type*)norms.getMemory() ;
}
else {
normsData = 0 ;
workspaceSize *= 2 ;
}
type * scratchData = (type*)op.context.getWorkspace(vl::VLDT_GPU, workspaceSize) ;
if (normsData == NULL) {
normsData = scratchData + vp.normsVolume ;
}
// Accumulate norms.
if (!givenNorms) {
computeNorms<type>
<<< divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(normsData,inputData,op.exponent,op.epsilon,vp) ;
}
// Compute sum(derOutput .* input).
type const* derOutputData = (type const*)derOutput.getMemory() ;
computeSum<type>
<<< divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(scratchData,inputData,derOutputData,vp) ;
// Compute derInputs.
type * derInputData = (type*)derInput.getMemory() ;
computeDerInput<type>
<<< divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derInputData,inputData,normsData,derOutputData,scratchData,op.exponent,vp) ;
return vl::VLE_Success ;
}
} ;
template<vl::DataType dataType>
struct NormalizeLpBackward<vl::VLDT_GPU, dataType>
: public NormalizeLpBackwardGPU<dataType,false>
{ } ;
template<vl::DataType dataType>
struct NormalizeLpBackwardWithNorms<vl::VLDT_GPU, dataType>
: public NormalizeLpBackwardGPU<dataType,true>
{ } ;
|
650093dd7c09af8534ee7f804e9182439f26b55d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
#include <device_launch_parameters.h>
#include <algorithm>
//-----------------------------------------------------------------------------
__inline__
__device__
int warpReduceSum(int value)
{
for (int offset = warpSize / 2; offset > 0; offset /= 2)
value += __shfl_down(value, offset);
return value;
}
//-----------------------------------------------------------------------------
__inline__
__device__
double warpReduceSum(double value)
{
for (int offset = warpSize / 2; offset > 0; offset /= 2)
value += __shfl_down(value, offset);
return value;
}
//-----------------------------------------------------------------------------
__inline__
__device__
int blockReduceSum(int value)
{
static __shared__ int shared[32];
const int lane = threadIdx.x % warpSize;
const int warpId = threadIdx.x / warpSize;
value = warpReduceSum(value);
if (lane == 0)
shared[warpId] = value;
__syncthreads();
value = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (warpId == 0) value = warpReduceSum(value);
return value;
}
//-----------------------------------------------------------------------------
__inline__
__device__
double blockReduceSum(double value)
{
static __shared__ double shared[32];
const int lane = threadIdx.x % warpSize;
const int warpId = threadIdx.x / warpSize;
value = warpReduceSum(value);
if (lane == 0)
shared[warpId] = value;
__syncthreads();
value = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (warpId == 0) value = warpReduceSum(value);
return value;
}
//-----------------------------------------------------------------------------
__global__ void deviceReduceSumKernel(int* in, int* out, const int n)
{
int sum = 0;
const int index = blockDim.x * blockIdx.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i += stride)
sum += in[i];
sum = blockReduceSum(sum);
if (threadIdx.x == 0)
out[blockIdx.x] = sum;
}
//-----------------------------------------------------------------------------
__global__ void deviceReduceSumWarpAtomicKernel(int* in, int* out, const int n)
{
int sum = 0;
const int index = blockDim.x * blockIdx.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i += stride)
sum += in[i];
sum = warpReduceSum(sum);
if (threadIdx.x % warpSize == 0)
atomicAdd(out, sum);
}
//-----------------------------------------------------------------------------
__global__ void deviceReduceSumBlockAtomicKernel(int* in, int* out, const int n)
{
int sum = 0;
const int index = blockDim.x * blockIdx.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i += stride)
sum += in[i];
sum = blockReduceSum(sum);
if (threadIdx.x == 0)
atomicAdd(out, sum);
}
//-----------------------------------------------------------------------------
__global__ void deviceReduceSumKernel(double* in, double* out, const int n)
{
double sum = 0;
const int index = blockDim.x * blockIdx.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i += stride)
sum += in[i];
sum = blockReduceSum(sum);
if (threadIdx.x == 0)
out[blockIdx.x] = sum;
}
//-----------------------------------------------------------------------------
template<typename T>
T vec_sum_core(T* vec, const int n)
{
const int size = sizeof(T) * n;
T* dVec;
T* dOut;
hipMalloc(&dVec, size);
hipMemcpy(dVec, vec, size, hipMemcpyHostToDevice);
const int threads = 512;
const int blocks = ::min((n + threads - 1) / threads, 1024);
hipMalloc(&dOut, sizeof(T) * blocks);
deviceReduceSumKernel << <blocks, threads >> > (dVec, dOut, n);
deviceReduceSumKernel << <1, 1024 >> > (dOut, dOut, blocks);
hipDeviceSynchronize();
T sum;
hipMemcpy(&sum, dOut, sizeof(T), hipMemcpyDeviceToHost);
hipFree(dVec);
hipFree(dOut);
return sum;
}
//-----------------------------------------------------------------------------
int vec_sum(int* vec, const int n)
{
return vec_sum_core(vec, n);
}
//-----------------------------------------------------------------------------
int vec_sum_warp_atomic(int* vec, const int n)
{
const int size = sizeof(int) * n;
int* dVec;
int* dOut;
hipMalloc(&dVec, size);
hipMalloc(&dOut, sizeof(int));
hipMemcpy(dVec, vec, size, hipMemcpyHostToDevice);
const int threads = 512;
const int blocks = ::min((n + threads - 1) / threads, 1024);
hipMemsetAsync(dOut, 0, sizeof(int));
hipLaunchKernelGGL(( deviceReduceSumWarpAtomicKernel), dim3(blocks), dim3(threads) , 0, 0, dVec, dOut, n);
hipDeviceSynchronize();
int sum;
hipMemcpy(&sum, dOut, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dVec);
hipFree(dOut);
return sum;
}
//-----------------------------------------------------------------------------
int vec_sum_block_atomic(int* vec, const int n)
{
const int size = sizeof(int) * n;
int* dVec;
int* dOut;
hipMalloc(&dVec, size);
hipMalloc(&dOut, sizeof(int));
hipMemcpy(dVec, vec, size, hipMemcpyHostToDevice);
const int threads = 512;
const int blocks = ::min((n + threads - 1) / threads, 1024);
hipMemsetAsync(dOut, 0, sizeof(int));
hipLaunchKernelGGL(( deviceReduceSumBlockAtomicKernel), dim3(blocks), dim3(threads) , 0, 0, dVec, dOut, n);
hipDeviceSynchronize();
int sum;
hipMemcpy(&sum, dOut, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dVec);
hipFree(dOut);
return sum;
}
//-----------------------------------------------------------------------------
double vec_sum(double* vec, const int n)
{
return vec_sum_core(vec, n);
}
|
650093dd7c09af8534ee7f804e9182439f26b55d.cu
|
#include "kernels.h"
#include <device_launch_parameters.h>
#include <algorithm>
//-----------------------------------------------------------------------------
__inline__
__device__
int warpReduceSum(int value)
{
for (int offset = warpSize / 2; offset > 0; offset /= 2)
value += __shfl_down(value, offset);
return value;
}
//-----------------------------------------------------------------------------
__inline__
__device__
double warpReduceSum(double value)
{
for (int offset = warpSize / 2; offset > 0; offset /= 2)
value += __shfl_down(value, offset);
return value;
}
//-----------------------------------------------------------------------------
__inline__
__device__
int blockReduceSum(int value)
{
static __shared__ int shared[32];
const int lane = threadIdx.x % warpSize;
const int warpId = threadIdx.x / warpSize;
value = warpReduceSum(value);
if (lane == 0)
shared[warpId] = value;
__syncthreads();
value = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (warpId == 0) value = warpReduceSum(value);
return value;
}
//-----------------------------------------------------------------------------
__inline__
__device__
double blockReduceSum(double value)
{
static __shared__ double shared[32];
const int lane = threadIdx.x % warpSize;
const int warpId = threadIdx.x / warpSize;
value = warpReduceSum(value);
if (lane == 0)
shared[warpId] = value;
__syncthreads();
value = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (warpId == 0) value = warpReduceSum(value);
return value;
}
//-----------------------------------------------------------------------------
__global__ void deviceReduceSumKernel(int* in, int* out, const int n)
{
int sum = 0;
const int index = blockDim.x * blockIdx.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i += stride)
sum += in[i];
sum = blockReduceSum(sum);
if (threadIdx.x == 0)
out[blockIdx.x] = sum;
}
//-----------------------------------------------------------------------------
__global__ void deviceReduceSumWarpAtomicKernel(int* in, int* out, const int n)
{
int sum = 0;
const int index = blockDim.x * blockIdx.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i += stride)
sum += in[i];
sum = warpReduceSum(sum);
if (threadIdx.x % warpSize == 0)
atomicAdd(out, sum);
}
//-----------------------------------------------------------------------------
__global__ void deviceReduceSumBlockAtomicKernel(int* in, int* out, const int n)
{
int sum = 0;
const int index = blockDim.x * blockIdx.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i += stride)
sum += in[i];
sum = blockReduceSum(sum);
if (threadIdx.x == 0)
atomicAdd(out, sum);
}
//-----------------------------------------------------------------------------
__global__ void deviceReduceSumKernel(double* in, double* out, const int n)
{
double sum = 0;
const int index = blockDim.x * blockIdx.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
for (int i = index; i < n; i += stride)
sum += in[i];
sum = blockReduceSum(sum);
if (threadIdx.x == 0)
out[blockIdx.x] = sum;
}
//-----------------------------------------------------------------------------
template<typename T>
T vec_sum_core(T* vec, const int n)
{
const int size = sizeof(T) * n;
T* dVec;
T* dOut;
cudaMalloc(&dVec, size);
cudaMemcpy(dVec, vec, size, cudaMemcpyHostToDevice);
const int threads = 512;
const int blocks = std::min((n + threads - 1) / threads, 1024);
cudaMalloc(&dOut, sizeof(T) * blocks);
deviceReduceSumKernel << <blocks, threads >> > (dVec, dOut, n);
deviceReduceSumKernel << <1, 1024 >> > (dOut, dOut, blocks);
cudaDeviceSynchronize();
T sum;
cudaMemcpy(&sum, dOut, sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(dVec);
cudaFree(dOut);
return sum;
}
//-----------------------------------------------------------------------------
int vec_sum(int* vec, const int n)
{
return vec_sum_core(vec, n);
}
//-----------------------------------------------------------------------------
int vec_sum_warp_atomic(int* vec, const int n)
{
const int size = sizeof(int) * n;
int* dVec;
int* dOut;
cudaMalloc(&dVec, size);
cudaMalloc(&dOut, sizeof(int));
cudaMemcpy(dVec, vec, size, cudaMemcpyHostToDevice);
const int threads = 512;
const int blocks = std::min((n + threads - 1) / threads, 1024);
cudaMemsetAsync(dOut, 0, sizeof(int));
deviceReduceSumWarpAtomicKernel<<< blocks, threads >>>(dVec, dOut, n);
cudaDeviceSynchronize();
int sum;
cudaMemcpy(&sum, dOut, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dVec);
cudaFree(dOut);
return sum;
}
//-----------------------------------------------------------------------------
int vec_sum_block_atomic(int* vec, const int n)
{
const int size = sizeof(int) * n;
int* dVec;
int* dOut;
cudaMalloc(&dVec, size);
cudaMalloc(&dOut, sizeof(int));
cudaMemcpy(dVec, vec, size, cudaMemcpyHostToDevice);
const int threads = 512;
const int blocks = std::min((n + threads - 1) / threads, 1024);
cudaMemsetAsync(dOut, 0, sizeof(int));
deviceReduceSumBlockAtomicKernel<<< blocks, threads >>>(dVec, dOut, n);
cudaDeviceSynchronize();
int sum;
cudaMemcpy(&sum, dOut, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dVec);
cudaFree(dOut);
return sum;
}
//-----------------------------------------------------------------------------
double vec_sum(double* vec, const int n)
{
return vec_sum_core(vec, n);
}
|
3b611971e6b7fbd6713cc95df9e73e0fee632eb2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 1
#define TC 8
#define C 32
#define N 32
#define H 7
#define W 7
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[2];
__shared__ float pad_temp_shared[1568];
__shared__ float kernel_shared[256];
float pad_temp_shared_local[32];
float kernel_shared_local[64];
#pragma unroll
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
compute_local[(ff_c_init)] = 0.000000e+00f;
}
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
#pragma unroll
for (int rx_outer = 0; rx_outer < 3; ++rx_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 8; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
pad_temp_shared[(((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 56)) + (((int)threadIdx.x) * 8)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= (ry_outer + (((((int)threadIdx.y) * 8) + (((((int)threadIdx.x) * 8) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 7)) % 7))) && ((ry_outer + (((((int)threadIdx.y) * 8) + (((((int)threadIdx.x) * 8) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 7)) % 7)) < 8)) && (1 <= (rx_outer + (((((int)threadIdx.x) * 8) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 7)))) && ((rx_outer + (((((int)threadIdx.x) * 8) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 7)) < 8)) ? data[((((((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 56)) + (((int)threadIdx.x) * 8)) + (ry_outer * 7)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) + rx_outer) - 8))] : 0.000000e+00f);
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 2; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if (((((int)threadIdx.z) * 2) + ((((((int)threadIdx.y) * 10) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) >> 5)) < 8) {
if (((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 10)) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 256) {
if ((((((int)threadIdx.y) * 10) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 64) {
if (((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 10) {
kernel_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 10)) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[((((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 576)) + (((int)threadIdx.y) * 90)) + (((int)threadIdx.x) * 18)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 * 9)) + (ry_outer * 3)) + rx_outer))];
}
}
}
}
}
__syncthreads();
#pragma unroll
for (int ax1 = 0; ax1 < 32; ++ax1) {
pad_temp_shared_local[(ax1)] = pad_temp_shared[((((ax1 * 49) + (((int)threadIdx.y) * 7)) + ((int)threadIdx.x)))];
}
#pragma unroll
for (int ax0 = 0; ax0 < 2; ++ax0) {
#pragma unroll
for (int ax11 = 0; ax11 < 32; ++ax11) {
kernel_shared_local[(((ax0 * 32) + ax11))] = kernel_shared[((((((int)threadIdx.z) * 64) + (ax0 * 32)) + ax11))];
}
}
#pragma unroll
for (int rc_inner_inner = 0; rc_inner_inner < 32; ++rc_inner_inner) {
#pragma unroll
for (int ff_c = 0; ff_c < 2; ++ff_c) {
compute_local[(ff_c)] = (compute_local[(ff_c)] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[(((ff_c * 32) + rc_inner_inner))]));
}
}
}
}
#pragma unroll
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
compute[((((((((int)blockIdx.z) * 392) + (((int)threadIdx.z) * 98)) + (ff_inner_inner_inner * 49)) + (((int)threadIdx.y) * 7)) + ((int)threadIdx.x)))] = compute_local[(ff_inner_inner_inner)];
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,1,4);
dim3 block(7,7,4);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-oracle.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<
cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
3b611971e6b7fbd6713cc95df9e73e0fee632eb2.cu
|
#include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 1
#define TC 8
#define C 32
#define N 32
#define H 7
#define W 7
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[2];
__shared__ float pad_temp_shared[1568];
__shared__ float kernel_shared[256];
float pad_temp_shared_local[32];
float kernel_shared_local[64];
#pragma unroll
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
compute_local[(ff_c_init)] = 0.000000e+00f;
}
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
#pragma unroll
for (int rx_outer = 0; rx_outer < 3; ++rx_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 8; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
pad_temp_shared[(((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 56)) + (((int)threadIdx.x) * 8)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= (ry_outer + (((((int)threadIdx.y) * 8) + (((((int)threadIdx.x) * 8) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 7)) % 7))) && ((ry_outer + (((((int)threadIdx.y) * 8) + (((((int)threadIdx.x) * 8) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 7)) % 7)) < 8)) && (1 <= (rx_outer + (((((int)threadIdx.x) * 8) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 7)))) && ((rx_outer + (((((int)threadIdx.x) * 8) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 7)) < 8)) ? data[((((((((((int)threadIdx.z) * 392) + (((int)threadIdx.y) * 56)) + (((int)threadIdx.x) * 8)) + (ry_outer * 7)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) + rx_outer) - 8))] : 0.000000e+00f);
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 2; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if (((((int)threadIdx.z) * 2) + ((((((int)threadIdx.y) * 10) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) >> 5)) < 8) {
if (((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 10)) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 256) {
if ((((((int)threadIdx.y) * 10) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 64) {
if (((((int)threadIdx.x) * 2) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 10) {
kernel_shared[(((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 10)) + (((int)threadIdx.x) * 2)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[((((((((((int)blockIdx.z) * 2304) + (((int)threadIdx.z) * 576)) + (((int)threadIdx.y) * 90)) + (((int)threadIdx.x) * 18)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 * 9)) + (ry_outer * 3)) + rx_outer))];
}
}
}
}
}
__syncthreads();
#pragma unroll
for (int ax1 = 0; ax1 < 32; ++ax1) {
pad_temp_shared_local[(ax1)] = pad_temp_shared[((((ax1 * 49) + (((int)threadIdx.y) * 7)) + ((int)threadIdx.x)))];
}
#pragma unroll
for (int ax0 = 0; ax0 < 2; ++ax0) {
#pragma unroll
for (int ax11 = 0; ax11 < 32; ++ax11) {
kernel_shared_local[(((ax0 * 32) + ax11))] = kernel_shared[((((((int)threadIdx.z) * 64) + (ax0 * 32)) + ax11))];
}
}
#pragma unroll
for (int rc_inner_inner = 0; rc_inner_inner < 32; ++rc_inner_inner) {
#pragma unroll
for (int ff_c = 0; ff_c < 2; ++ff_c) {
compute_local[(ff_c)] = (compute_local[(ff_c)] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[(((ff_c * 32) + rc_inner_inner))]));
}
}
}
}
#pragma unroll
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
compute[((((((((int)blockIdx.z) * 392) + (((int)threadIdx.z) * 98)) + (ff_inner_inner_inner * 49)) + (((int)threadIdx.y) * 7)) + ((int)threadIdx.x)))] = compute_local[(ff_inner_inner_inner)];
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,1,4);
dim3 block(7,7,4);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-oracle.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<
cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
8c873dbe1e4d009d9486257a6d02563d12894d26.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file mpn_cov_gpu.cu
// @brief MPN-COV implementation (GPU)
// @author Jiangtao Xie
// @author Peihua Li
/*
Copyright (C) 2017 Peihua Li and Jiangtao Xie
All rights reserved.
*/
#include "nncov_pool_blas.hpp"
#include "../data.hpp"
#include <math.h>
#include <memory>
#include <cstdlib>
#include <algorithm>
#include <limits>
#include <cassert>
#include "blashelper_gpu.hpp"
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x) // We import this Macro function from our Caffe Implementation
inline int
GET_BLOCKS(const int N)
{
return (N + VL_CUDA_NUM_THREADS - 1) / VL_CUDA_NUM_THREADS; // We import this function from our Caffe Implementation
}
template<typename T> __global__ void set_kernel(const ptrdiff_t n, const T alpha, T* y)
{
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template<typename T> void gpuMemset(const ptrdiff_t n, const T alpha, T* y)
{
if(alpha == 0){
hipMemset(y, 0, sizeof(T)*n);
}
hipLaunchKernelGGL(( set_kernel<T>), dim3(GET_BLOCKS(n)),dim3(VL_CUDA_NUM_THREADS), 0, 0, n , alpha, y);
}
template<typename T> __global__ void init_II_kernel(T* a,
T aux_value,
const ptrdiff_t n)
{
CUDA_KERNEL_LOOP(index,n){
a[index*(n+1)] = a[index*(n+1)] + aux_value;
}
}
template<typename T> __global__ void symmetric_kernel(T const* a,
T* b,
int n)
{
int lda = n,offset;
CUDA_KERNEL_LOOP(i,n){
offset = i;
for(int j = offset;j < n;j ++) {
b[i * lda + j] = (a[i * lda + j] + a[j * lda + i]) / 2.0f;
b[j * lda + i] = b[i * lda + j];
}
}
}
template<typename T> __host__ void
symmetric_gpu(T const* a,
T* b,
ptrdiff_t n)
{
hipLaunchKernelGGL(( symmetric_kernel<T>)
, dim3(GET_BLOCKS(n)),dim3(VL_CUDA_NUM_THREADS), 0, 0, a,b,n);
}
namespace vl { namespace impl {
template<typename T,vl::DataType dataType>
struct cov_pool<vl::VLDT_GPU,T,dataType>
{
static vl::ErrorCode
forward(Context& context,
T* output,
T const* data,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t m = height,n = width,L = depth,d;
ptrdiff_t dataOffset;
ptrdiff_t outputOffset;
unsigned int workspaceSize = (unsigned int)(m*m + m*n);
T* workspace = (T*)context.getWorkspace(vl::VLDT_GPU , workspaceSize*sizeof(T));
T* II = workspace;
T* cov_work = II + m*m;
T aux_I_value= -(T)1 / m / m;
gpuMemset(m*m, aux_I_value, II);
hipLaunchKernelGGL(( init_II_kernel<T>), dim3(GET_BLOCKS(m)),dim3(VL_CUDA_NUM_THREADS), 0, 0, II,(T)1/m,m);
for(d = 0;d < L; d++){ //Covariance
dataOffset = d*m*n;
outputOffset = d*n*n;
error = vl::impl::blas<vl::VLDT_GPU, dataType>::cov(context,
data + dataOffset,
output + outputOffset,II,cov_work,
m,n);
symmetric_gpu(output + outputOffset,output + outputOffset,n);
if(error != VLE_Success) {goto done;}
}
done:
return context.passError(error, __func__);
}
static vl::ErrorCode
backward(Context& context,
T* derData,
T const* data,
T const* derOutput,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t m = height*width,n = depth,L = num,d;
ptrdiff_t derOutputOffset,dataOffset;
ptrdiff_t derDataOffset;
unsigned int workspaceSize = (unsigned int)(m*n + m*m + n*n);
T* workspace = (T*)context.getWorkspace(vl::VLDT_GPU , workspaceSize*sizeof(T));
T* I_X = workspace;
T* II = I_X + m*n;
T* dLdP = II + m*m;
T aux_I_value= -(T)1 / m / m;
gpuMemset(m*m, aux_I_value, II);
hipLaunchKernelGGL(( init_II_kernel<T>), dim3(GET_BLOCKS(m)),dim3(VL_CUDA_NUM_THREADS), 0, 0, II,(T)1/m,m);
for(d = 0;d < L;d++){
dataOffset = d*m*n;
derOutputOffset = d*n*n;
derDataOffset = d*m*n;
symmetric_gpu(derOutput + derOutputOffset,dLdP,n);
error = vl::impl::blas<vl::VLDT_GPU, dataType>::gemm(context,
'n','n',
m,n,m,
T(1),II,m,
data + dataOffset,m,
T(0),I_X,m);
if(error != vl::VLE_Success) {goto done ;}
error = vl::impl::blas<vl::VLDT_GPU, dataType>::gemm(context,
'n','n',
m,n,n,
T(2),I_X,m,
dLdP,n,
T(0),derData + derDataOffset,m);
if(error != vl::VLE_Success) {goto done ;}
}
done:
return context.passError(error, __func__);
}
};
} }
template struct vl::impl::cov_pool<vl::VLDT_GPU, float,vl::VLDT_Float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::cov_pool<vl::VLDT_GPU, double, vl::VLDT_Double> ;
#endif
|
8c873dbe1e4d009d9486257a6d02563d12894d26.cu
|
// @file mpn_cov_gpu.cu
// @brief MPN-COV implementation (GPU)
// @author Jiangtao Xie
// @author Peihua Li
/*
Copyright (C) 2017 Peihua Li and Jiangtao Xie
All rights reserved.
*/
#include "nncov_pool_blas.hpp"
#include "../data.hpp"
#include <math.h>
#include <memory>
#include <cstdlib>
#include <algorithm>
#include <limits>
#include <cassert>
#include "blashelper_gpu.hpp"
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x) // We import this Macro function from our Caffe Implementation
inline int
GET_BLOCKS(const int N)
{
return (N + VL_CUDA_NUM_THREADS - 1) / VL_CUDA_NUM_THREADS; // We import this function from our Caffe Implementation
}
template<typename T> __global__ void set_kernel(const ptrdiff_t n, const T alpha, T* y)
{
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template<typename T> void gpuMemset(const ptrdiff_t n, const T alpha, T* y)
{
if(alpha == 0){
cudaMemset(y, 0, sizeof(T)*n);
}
set_kernel<T><<<GET_BLOCKS(n),VL_CUDA_NUM_THREADS>>>(n , alpha, y);
}
template<typename T> __global__ void init_II_kernel(T* a,
T aux_value,
const ptrdiff_t n)
{
CUDA_KERNEL_LOOP(index,n){
a[index*(n+1)] = a[index*(n+1)] + aux_value;
}
}
template<typename T> __global__ void symmetric_kernel(T const* a,
T* b,
int n)
{
int lda = n,offset;
CUDA_KERNEL_LOOP(i,n){
offset = i;
for(int j = offset;j < n;j ++) {
b[i * lda + j] = (a[i * lda + j] + a[j * lda + i]) / 2.0f;
b[j * lda + i] = b[i * lda + j];
}
}
}
template<typename T> __host__ void
symmetric_gpu(T const* a,
T* b,
ptrdiff_t n)
{
symmetric_kernel<T>
<<<GET_BLOCKS(n),VL_CUDA_NUM_THREADS>>>(a,b,n);
}
namespace vl { namespace impl {
template<typename T,vl::DataType dataType>
struct cov_pool<vl::VLDT_GPU,T,dataType>
{
static vl::ErrorCode
forward(Context& context,
T* output,
T const* data,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t m = height,n = width,L = depth,d;
ptrdiff_t dataOffset;
ptrdiff_t outputOffset;
unsigned int workspaceSize = (unsigned int)(m*m + m*n);
T* workspace = (T*)context.getWorkspace(vl::VLDT_GPU , workspaceSize*sizeof(T));
T* II = workspace;
T* cov_work = II + m*m;
T aux_I_value= -(T)1 / m / m;
gpuMemset(m*m, aux_I_value, II);
init_II_kernel<T><<<GET_BLOCKS(m),VL_CUDA_NUM_THREADS>>>(II,(T)1/m,m);
for(d = 0;d < L; d++){ //Covariance
dataOffset = d*m*n;
outputOffset = d*n*n;
error = vl::impl::blas<vl::VLDT_GPU, dataType>::cov(context,
data + dataOffset,
output + outputOffset,II,cov_work,
m,n);
symmetric_gpu(output + outputOffset,output + outputOffset,n);
if(error != VLE_Success) {goto done;}
}
done:
return context.passError(error, __func__);
}
static vl::ErrorCode
backward(Context& context,
T* derData,
T const* data,
T const* derOutput,
size_t height, size_t width, size_t depth, size_t num)
{
vl::ErrorCode error;
ptrdiff_t m = height*width,n = depth,L = num,d;
ptrdiff_t derOutputOffset,dataOffset;
ptrdiff_t derDataOffset;
unsigned int workspaceSize = (unsigned int)(m*n + m*m + n*n);
T* workspace = (T*)context.getWorkspace(vl::VLDT_GPU , workspaceSize*sizeof(T));
T* I_X = workspace;
T* II = I_X + m*n;
T* dLdP = II + m*m;
T aux_I_value= -(T)1 / m / m;
gpuMemset(m*m, aux_I_value, II);
init_II_kernel<T><<<GET_BLOCKS(m),VL_CUDA_NUM_THREADS>>>(II,(T)1/m,m);
for(d = 0;d < L;d++){
dataOffset = d*m*n;
derOutputOffset = d*n*n;
derDataOffset = d*m*n;
symmetric_gpu(derOutput + derOutputOffset,dLdP,n);
error = vl::impl::blas<vl::VLDT_GPU, dataType>::gemm(context,
'n','n',
m,n,m,
T(1),II,m,
data + dataOffset,m,
T(0),I_X,m);
if(error != vl::VLE_Success) {goto done ;}
error = vl::impl::blas<vl::VLDT_GPU, dataType>::gemm(context,
'n','n',
m,n,n,
T(2),I_X,m,
dLdP,n,
T(0),derData + derDataOffset,m);
if(error != vl::VLE_Success) {goto done ;}
}
done:
return context.passError(error, __func__);
}
};
} }
template struct vl::impl::cov_pool<vl::VLDT_GPU, float,vl::VLDT_Float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::cov_pool<vl::VLDT_GPU, double, vl::VLDT_Double> ;
#endif
|
c4be23b4a1298325f1102c0bd18b63293483f19d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/sparse/gpu/conv.cu.h"
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
#include "paddle/phi/kernels/sparse/gpu/gather_gemm_scatter.h"
#endif
namespace phi {
namespace sparse {
extern size_t workspace_size;
// rulebook[3, rulebook_len]:
//[
// [kernel_index],
// [in_i],
// [out_i],
//]
// x_grad = out_grad * transpose(kenrel)
// kernel_grad = transpose(x) * out_grad
template <typename T, typename IntT>
void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const SparseCooTensor& out,
const DenseTensor& rulebook,
const DenseTensor& counter,
const SparseCooTensor& out_grad,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
const std::string& key,
SparseCooTensor* x_grad,
DenseTensor* kernel_grad) {
const auto& kernel_dims = kernel.dims();
const bool is2D = kernel_dims.size() == 4 ? true : false;
const int kernel_size =
is2D ? kernel_dims[0] * kernel_dims[1]
: kernel_dims[0] * kernel_dims[1] * kernel_dims[2];
const int in_channels = is2D ? kernel_dims[2] : kernel_dims[3];
const int out_channels = is2D ? kernel_dims[3] : kernel_dims[4];
int rulebook_len = 0;
const IntT* rulebook_ptr = phi::funcs::sparse::GetRulebookPtr<IntT>(
out, rulebook, key, &rulebook_len);
const int* counter_ptr = phi::funcs::sparse::GetCounterPtr(out, counter, key);
phi::DenseTensor in_features =
phi::Empty<T>(dev_ctx, {rulebook_len, in_channels});
phi::DenseTensor d_x_features =
phi::Empty<T>(dev_ctx, {rulebook_len, in_channels});
phi::DenseTensor out_grad_features =
phi::Empty<T>(dev_ctx, {rulebook_len, out_channels});
T* in_features_ptr = in_features.data<T>();
T* d_x_features_ptr = d_x_features.data<T>();
T* out_grad_features_ptr = out_grad_features.data<T>();
*kernel_grad = phi::EmptyLike<T>(dev_ctx, kernel);
T* d_kernel_ptr = kernel_grad->data<T>();
phi::backends::gpu::GpuMemsetAsync(
d_kernel_ptr, 0, sizeof(T) * kernel_grad->numel(), dev_ctx.stream());
int half_kernel_size = kernel_size / 2;
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
DenseTensor x_grad_indices = phi::EmptyLike<IntT>(dev_ctx, x.indices());
DenseTensor x_grad_values = phi::EmptyLike<T>(dev_ctx, x.values());
T* x_grad_values_ptr = x_grad_values.data<T>();
phi::backends::gpu::GpuMemsetAsync(x_grad_values_ptr,
0,
sizeof(T) * x_grad_values.numel(),
dev_ctx.stream());
phi::backends::gpu::GpuMemsetAsync(
d_x_features_ptr, 0, sizeof(T) * d_x_features.numel(), dev_ctx.stream());
phi::Copy<GPUContext>(
dev_ctx, x.indices(), dev_ctx.GetPlace(), false, &x_grad_indices);
x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true);
std::vector<int> offsets(kernel_size + 1);
int offset = 0, max_count = 0;
for (int i = 0; i < kernel_size; i++) {
offsets[i] = offset;
offset += counter_ptr[i];
if (i < half_kernel_size) {
max_count = ::max(max_count, counter_ptr[i]);
}
}
offsets[kernel_size] = offset;
if (subm) {
phi::funcs::sparse::SubmPreProcess<T, GPUContext>(dev_ctx,
x,
kernel,
out_grad.values(),
in_channels,
out_channels,
half_kernel_size,
kernel_grad,
&x_grad_values);
if (max_count == 0) {
return;
}
}
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, rulebook_len, 1);
DenseTensor unique_value = phi::Empty<int>(
dev_ctx, {static_cast<int>(x_grad->nnz() * kernel_size * 2)});
DenseTensor out_index =
phi::Empty<int>(dev_ctx, {static_cast<int>(x.nnz() * 2)});
int* out_index_ptr = out_index.data<int>();
int* unique_value_ptr = unique_value.data<int>();
phi::backends::gpu::GpuMemsetAsync(
out_index_ptr, 0, sizeof(int) * x.nnz() * 2, dev_ctx.stream());
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
bool cutlass = true;
if (dev_ctx.GetComputeCapability() < 80) cutlass = false;
if (in_channels % 4 != 0 || out_channels % 4 != 0) cutlass = false;
if (std::is_same<T, phi::dtype::float16>::value ||
std::is_same<T, double>::value)
cutlass = false;
if (!std::is_same<IntT, int32_t>::value) cutlass = false;
if (!cutlass) {
#endif
hipLaunchKernelGGL(( GroupIndexsV2), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), rulebook_len,
x.nnz(),
kernel_size,
offsets[kernel_size / 2],
rulebook_ptr,
out_index_ptr,
unique_value_ptr);
GatherV2<T, IntT>(dev_ctx,
x.values().data<T>(),
out_index_ptr,
unique_value_ptr,
x.nnz(),
kernel_size,
in_channels,
2,
in_features_ptr);
Gather<T, IntT>(dev_ctx,
out_grad.values().data<T>(),
rulebook_ptr + rulebook_len,
rulebook_len,
out_channels,
out_grad_features_ptr);
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
}
#endif
const T* kernel_ptr = kernel.data<T>();
for (int i = 0; i < kernel_size; i++) {
if (counter_ptr[i] <= 0 || (subm && i == half_kernel_size)) {
continue;
}
const int M = counter_ptr[i];
const int K = in_channels;
const int N = out_channels;
T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels;
T* tmp_out_grad_ptr = out_grad_features_ptr + offsets[i] * out_channels;
const T* tmp_kernel_ptr = kernel_ptr + i * in_channels * out_channels;
T* tmp_d_x_ptr = d_x_features_ptr + offsets[i] * in_channels;
T* tmp_d_kernel_ptr = d_kernel_ptr + i * in_channels * out_channels;
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
if (cutlass) {
const IntT* gather_x_indices = rulebook_ptr + offsets[i];
const IntT* scatter_x_indices = rulebook_ptr + offsets[i];
const IntT* gather_out_indices = rulebook_ptr + rulebook_len + offsets[i];
const size_t key = autotune::GenKey(M / features_num_range, N, K);
// call gemm: d_kernel = transpose(x) * out_grad
// (in_channels, n) * (n, out_channels)
static cutlass::device_memory::allocation<uint8_t> workspace(
workspace_size);
GatherGemmScatterDriver<80, true, false>(
dev_ctx,
key,
x.values().data<T>(),
out_grad.values().data<T>(),
tmp_d_kernel_ptr,
tmp_d_kernel_ptr,
in_channels,
out_channels,
counter_ptr[i],
gather_x_indices,
gather_out_indices,
static_cast<const IntT*>(nullptr),
static_cast<const T>(1.0),
static_cast<const T>(0.0),
&workspace);
// call gemm: d_x = out_grad * transpose(kernel)
// (n, out_channels) * (out_channels, in_channels)
GatherGemmScatterDriver<80, false, true>(
dev_ctx,
key,
out_grad.values().data<T>(),
tmp_kernel_ptr,
x_grad_values_ptr,
x_grad_values_ptr,
counter_ptr[i],
in_channels,
out_channels,
gather_out_indices,
static_cast<const IntT*>(nullptr),
scatter_x_indices,
static_cast<const T>(1.0),
static_cast<const T>(1.0),
nullptr);
} else {
#endif
// call gemm: d_kernel = transpose(x) * out_grad
// (in_channels, n) * (n, out_channels)
blas.GEMM(CblasTrans,
CblasNoTrans,
K,
N,
M,
static_cast<T>(1),
tmp_in_ptr,
tmp_out_grad_ptr,
static_cast<T>(0),
tmp_d_kernel_ptr);
// call gemm: d_x = out_grad * transpose(kernel)
// (n, out_channels) * (out_channels, in_channels)
blas.GEMM(CblasNoTrans,
CblasTrans,
M,
K,
N,
static_cast<T>(1),
tmp_out_grad_ptr,
tmp_kernel_ptr,
static_cast<T>(0),
tmp_d_x_ptr);
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
}
#endif
}
// 4. scatter
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
if (!cutlass) {
#endif
phi::funcs::sparse::ScatterV2<T>(dev_ctx,
d_x_features_ptr,
out_index.data<int>(),
unique_value.data<int>(),
x_grad->nnz(),
kernel_size,
in_channels,
2,
x_grad_values_ptr);
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
}
#endif
}
template <typename T, typename Context>
void Conv3dCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const SparseCooTensor& out,
const DenseTensor& rulebook,
const DenseTensor& counter,
const SparseCooTensor& out_grad,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
const std::string& key,
SparseCooTensor* x_grad,
DenseTensor* kernel_grad) {
PD_VISIT_BASE_INTEGRAL_TYPES(
x.indices().dtype(), "Conv3dCooGradGPUKernel", ([&] {
Conv3dCooGradGPUKernel<T, data_t>(dev_ctx,
x,
kernel,
out,
rulebook,
counter,
out_grad,
paddings,
dilations,
strides,
groups,
subm,
key,
x_grad,
kernel_grad);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(conv3d_coo_grad,
GPU,
ALL_LAYOUT,
phi::sparse::Conv3dCooGradKernel,
float,
double,
phi::dtype::float16) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
|
c4be23b4a1298325f1102c0bd18b63293483f19d.cu
|
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/conv_grad_kernel.h"
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/sparse/gpu/conv.cu.h"
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
#include "paddle/phi/kernels/sparse/gpu/gather_gemm_scatter.h"
#endif
namespace phi {
namespace sparse {
extern size_t workspace_size;
// rulebook[3, rulebook_len]:
//[
// [kernel_index],
// [in_i],
// [out_i],
//]
// x_grad = out_grad * transpose(kenrel)
// kernel_grad = transpose(x) * out_grad
template <typename T, typename IntT>
void Conv3dCooGradGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const SparseCooTensor& out,
const DenseTensor& rulebook,
const DenseTensor& counter,
const SparseCooTensor& out_grad,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
const std::string& key,
SparseCooTensor* x_grad,
DenseTensor* kernel_grad) {
const auto& kernel_dims = kernel.dims();
const bool is2D = kernel_dims.size() == 4 ? true : false;
const int kernel_size =
is2D ? kernel_dims[0] * kernel_dims[1]
: kernel_dims[0] * kernel_dims[1] * kernel_dims[2];
const int in_channels = is2D ? kernel_dims[2] : kernel_dims[3];
const int out_channels = is2D ? kernel_dims[3] : kernel_dims[4];
int rulebook_len = 0;
const IntT* rulebook_ptr = phi::funcs::sparse::GetRulebookPtr<IntT>(
out, rulebook, key, &rulebook_len);
const int* counter_ptr = phi::funcs::sparse::GetCounterPtr(out, counter, key);
phi::DenseTensor in_features =
phi::Empty<T>(dev_ctx, {rulebook_len, in_channels});
phi::DenseTensor d_x_features =
phi::Empty<T>(dev_ctx, {rulebook_len, in_channels});
phi::DenseTensor out_grad_features =
phi::Empty<T>(dev_ctx, {rulebook_len, out_channels});
T* in_features_ptr = in_features.data<T>();
T* d_x_features_ptr = d_x_features.data<T>();
T* out_grad_features_ptr = out_grad_features.data<T>();
*kernel_grad = phi::EmptyLike<T>(dev_ctx, kernel);
T* d_kernel_ptr = kernel_grad->data<T>();
phi::backends::gpu::GpuMemsetAsync(
d_kernel_ptr, 0, sizeof(T) * kernel_grad->numel(), dev_ctx.stream());
int half_kernel_size = kernel_size / 2;
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
DenseTensor x_grad_indices = phi::EmptyLike<IntT>(dev_ctx, x.indices());
DenseTensor x_grad_values = phi::EmptyLike<T>(dev_ctx, x.values());
T* x_grad_values_ptr = x_grad_values.data<T>();
phi::backends::gpu::GpuMemsetAsync(x_grad_values_ptr,
0,
sizeof(T) * x_grad_values.numel(),
dev_ctx.stream());
phi::backends::gpu::GpuMemsetAsync(
d_x_features_ptr, 0, sizeof(T) * d_x_features.numel(), dev_ctx.stream());
phi::Copy<GPUContext>(
dev_ctx, x.indices(), dev_ctx.GetPlace(), false, &x_grad_indices);
x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true);
std::vector<int> offsets(kernel_size + 1);
int offset = 0, max_count = 0;
for (int i = 0; i < kernel_size; i++) {
offsets[i] = offset;
offset += counter_ptr[i];
if (i < half_kernel_size) {
max_count = std::max(max_count, counter_ptr[i]);
}
}
offsets[kernel_size] = offset;
if (subm) {
phi::funcs::sparse::SubmPreProcess<T, GPUContext>(dev_ctx,
x,
kernel,
out_grad.values(),
in_channels,
out_channels,
half_kernel_size,
kernel_grad,
&x_grad_values);
if (max_count == 0) {
return;
}
}
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, rulebook_len, 1);
DenseTensor unique_value = phi::Empty<int>(
dev_ctx, {static_cast<int>(x_grad->nnz() * kernel_size * 2)});
DenseTensor out_index =
phi::Empty<int>(dev_ctx, {static_cast<int>(x.nnz() * 2)});
int* out_index_ptr = out_index.data<int>();
int* unique_value_ptr = unique_value.data<int>();
phi::backends::gpu::GpuMemsetAsync(
out_index_ptr, 0, sizeof(int) * x.nnz() * 2, dev_ctx.stream());
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
bool cutlass = true;
if (dev_ctx.GetComputeCapability() < 80) cutlass = false;
if (in_channels % 4 != 0 || out_channels % 4 != 0) cutlass = false;
if (std::is_same<T, phi::dtype::float16>::value ||
std::is_same<T, double>::value)
cutlass = false;
if (!std::is_same<IntT, int32_t>::value) cutlass = false;
if (!cutlass) {
#endif
GroupIndexsV2<<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(rulebook_len,
x.nnz(),
kernel_size,
offsets[kernel_size / 2],
rulebook_ptr,
out_index_ptr,
unique_value_ptr);
GatherV2<T, IntT>(dev_ctx,
x.values().data<T>(),
out_index_ptr,
unique_value_ptr,
x.nnz(),
kernel_size,
in_channels,
2,
in_features_ptr);
Gather<T, IntT>(dev_ctx,
out_grad.values().data<T>(),
rulebook_ptr + rulebook_len,
rulebook_len,
out_channels,
out_grad_features_ptr);
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
}
#endif
const T* kernel_ptr = kernel.data<T>();
for (int i = 0; i < kernel_size; i++) {
if (counter_ptr[i] <= 0 || (subm && i == half_kernel_size)) {
continue;
}
const int M = counter_ptr[i];
const int K = in_channels;
const int N = out_channels;
T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels;
T* tmp_out_grad_ptr = out_grad_features_ptr + offsets[i] * out_channels;
const T* tmp_kernel_ptr = kernel_ptr + i * in_channels * out_channels;
T* tmp_d_x_ptr = d_x_features_ptr + offsets[i] * in_channels;
T* tmp_d_kernel_ptr = d_kernel_ptr + i * in_channels * out_channels;
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
if (cutlass) {
const IntT* gather_x_indices = rulebook_ptr + offsets[i];
const IntT* scatter_x_indices = rulebook_ptr + offsets[i];
const IntT* gather_out_indices = rulebook_ptr + rulebook_len + offsets[i];
const size_t key = autotune::GenKey(M / features_num_range, N, K);
// call gemm: d_kernel = transpose(x) * out_grad
// (in_channels, n) * (n, out_channels)
static cutlass::device_memory::allocation<uint8_t> workspace(
workspace_size);
GatherGemmScatterDriver<80, true, false>(
dev_ctx,
key,
x.values().data<T>(),
out_grad.values().data<T>(),
tmp_d_kernel_ptr,
tmp_d_kernel_ptr,
in_channels,
out_channels,
counter_ptr[i],
gather_x_indices,
gather_out_indices,
static_cast<const IntT*>(nullptr),
static_cast<const T>(1.0),
static_cast<const T>(0.0),
&workspace);
// call gemm: d_x = out_grad * transpose(kernel)
// (n, out_channels) * (out_channels, in_channels)
GatherGemmScatterDriver<80, false, true>(
dev_ctx,
key,
out_grad.values().data<T>(),
tmp_kernel_ptr,
x_grad_values_ptr,
x_grad_values_ptr,
counter_ptr[i],
in_channels,
out_channels,
gather_out_indices,
static_cast<const IntT*>(nullptr),
scatter_x_indices,
static_cast<const T>(1.0),
static_cast<const T>(1.0),
nullptr);
} else {
#endif
// call gemm: d_kernel = transpose(x) * out_grad
// (in_channels, n) * (n, out_channels)
blas.GEMM(CblasTrans,
CblasNoTrans,
K,
N,
M,
static_cast<T>(1),
tmp_in_ptr,
tmp_out_grad_ptr,
static_cast<T>(0),
tmp_d_kernel_ptr);
// call gemm: d_x = out_grad * transpose(kernel)
// (n, out_channels) * (out_channels, in_channels)
blas.GEMM(CblasNoTrans,
CblasTrans,
M,
K,
N,
static_cast<T>(1),
tmp_out_grad_ptr,
tmp_kernel_ptr,
static_cast<T>(0),
tmp_d_x_ptr);
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
}
#endif
}
// 4. scatter
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
if (!cutlass) {
#endif
phi::funcs::sparse::ScatterV2<T>(dev_ctx,
d_x_features_ptr,
out_index.data<int>(),
unique_value.data<int>(),
x_grad->nnz(),
kernel_size,
in_channels,
2,
x_grad_values_ptr);
#if defined(PADDLE_WITH_CUTLASS) && SPCONV_WITH_CUTLASS
}
#endif
}
template <typename T, typename Context>
void Conv3dCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const SparseCooTensor& out,
const DenseTensor& rulebook,
const DenseTensor& counter,
const SparseCooTensor& out_grad,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
const std::string& key,
SparseCooTensor* x_grad,
DenseTensor* kernel_grad) {
PD_VISIT_BASE_INTEGRAL_TYPES(
x.indices().dtype(), "Conv3dCooGradGPUKernel", ([&] {
Conv3dCooGradGPUKernel<T, data_t>(dev_ctx,
x,
kernel,
out,
rulebook,
counter,
out_grad,
paddings,
dilations,
strides,
groups,
subm,
key,
x_grad,
kernel_grad);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(conv3d_coo_grad,
GPU,
ALL_LAYOUT,
phi::sparse::Conv3dCooGradKernel,
float,
double,
phi::dtype::float16) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
|
a0a1f220c06f99e3d583ef1cb2cc935fc1f67930.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "src/cuda/conv_bias/matmul/im2col_nhwc_int8.cuh"
#include "src/cuda/utils.cuh"
namespace {
template <bool flip>
__global__ void im2col_kern(
const int8_t* __restrict src, int8_t* __restrict unrolled, uint32_t N,
uint32_t IH, uint32_t IW, uint32_t IC, uint32_t IWS, uint32_t OH, uint32_t OW,
uint32_t OC, uint32_t OWS, uint32_t FH, uint32_t FW, uint32_t PH, uint32_t PW,
uint32_t SH, uint32_t SW, uint32_t DH, uint32_t DW, uint32_t LD) {
uint32_t ic = blockIdx.x * 32 + threadIdx.x;
uint32_t ow = blockIdx.y * 4 + threadIdx.y;
uint32_t oh = blockIdx.z * 4 + threadIdx.z;
uint32_t offset = (oh * OW + ow) * LD + ic;
if (ic < IC && ow < OW && oh < OH) {
for (uint32_t fh = 0; fh < FH; ++fh) {
for (size_t fw = 0; fw < FW; ++fw) {
uint32_t ih = -PH + oh * SH + (flip ? FH - fh - 1 : fh) * DH;
uint32_t iw = -PW + ow * SW + (flip ? FW - fw - 1 : fw) * DW;
uint32_t i = offset + (fh * FW + fw) * IC;
if (ih < IH && iw < IW) {
unrolled[i] = src[(ih * IW + iw) * IWS + ic];
} else {
unrolled[i] = 0;
}
}
}
}
}
} // anonymous namespace
void megdnn::cuda::im2col_nhwc_int8(
const int8_t* src, int8_t* unrolled, uint32_t N, uint32_t IH, uint32_t IW,
uint32_t IC, uint32_t IWS, uint32_t OH, uint32_t OW, uint32_t OC, uint32_t OWS,
uint32_t FH, uint32_t FW, uint32_t PH, uint32_t PW, uint32_t SH, uint32_t SW,
uint32_t DH, uint32_t DW, uint32_t LD, bool flip, hipStream_t stream) {
dim3 nthreads = dim3(32, 4, 4);
dim3 nblocks = dim3(DIVUP(IC, 32), DIVUP(OW, 4), DIVUP(OH, 4));
void (*kern_ptr)(
const int8_t* __restrict src, int8_t* __restrict unrolled, uint32_t N,
uint32_t IH, uint32_t IW, uint32_t IC, uint32_t IWS, uint32_t OH,
uint32_t OW, uint32_t OC, uint32_t OWS, uint32_t FH, uint32_t FW,
uint32_t PH, uint32_t PW, uint32_t SH, uint32_t SW, uint32_t DH,
uint32_t DW, uint32_t LD);
if (flip) {
kern_ptr = im2col_kern<true>;
} else {
kern_ptr = im2col_kern<false>;
}
for (size_t n = 0; n < N; ++n) {
hipLaunchKernelGGL(( kern_ptr), dim3(nblocks), dim3(nthreads), 0, stream,
src + n * IH * IW * IWS, unrolled + n * OH * OW * LD, N, IH, IW, IC,
IWS, OH, OW, OC, OWS, FH, FW, PH, PW, SH, SW, DH, DW, LD);
}
after_kernel_launch();
}
// vim: syntax=cpp.doxygen
|
a0a1f220c06f99e3d583ef1cb2cc935fc1f67930.cu
|
#include "src/cuda/conv_bias/matmul/im2col_nhwc_int8.cuh"
#include "src/cuda/utils.cuh"
namespace {
template <bool flip>
__global__ void im2col_kern(
const int8_t* __restrict src, int8_t* __restrict unrolled, uint32_t N,
uint32_t IH, uint32_t IW, uint32_t IC, uint32_t IWS, uint32_t OH, uint32_t OW,
uint32_t OC, uint32_t OWS, uint32_t FH, uint32_t FW, uint32_t PH, uint32_t PW,
uint32_t SH, uint32_t SW, uint32_t DH, uint32_t DW, uint32_t LD) {
uint32_t ic = blockIdx.x * 32 + threadIdx.x;
uint32_t ow = blockIdx.y * 4 + threadIdx.y;
uint32_t oh = blockIdx.z * 4 + threadIdx.z;
uint32_t offset = (oh * OW + ow) * LD + ic;
if (ic < IC && ow < OW && oh < OH) {
for (uint32_t fh = 0; fh < FH; ++fh) {
for (size_t fw = 0; fw < FW; ++fw) {
uint32_t ih = -PH + oh * SH + (flip ? FH - fh - 1 : fh) * DH;
uint32_t iw = -PW + ow * SW + (flip ? FW - fw - 1 : fw) * DW;
uint32_t i = offset + (fh * FW + fw) * IC;
if (ih < IH && iw < IW) {
unrolled[i] = src[(ih * IW + iw) * IWS + ic];
} else {
unrolled[i] = 0;
}
}
}
}
}
} // anonymous namespace
void megdnn::cuda::im2col_nhwc_int8(
const int8_t* src, int8_t* unrolled, uint32_t N, uint32_t IH, uint32_t IW,
uint32_t IC, uint32_t IWS, uint32_t OH, uint32_t OW, uint32_t OC, uint32_t OWS,
uint32_t FH, uint32_t FW, uint32_t PH, uint32_t PW, uint32_t SH, uint32_t SW,
uint32_t DH, uint32_t DW, uint32_t LD, bool flip, cudaStream_t stream) {
dim3 nthreads = dim3(32, 4, 4);
dim3 nblocks = dim3(DIVUP(IC, 32), DIVUP(OW, 4), DIVUP(OH, 4));
void (*kern_ptr)(
const int8_t* __restrict src, int8_t* __restrict unrolled, uint32_t N,
uint32_t IH, uint32_t IW, uint32_t IC, uint32_t IWS, uint32_t OH,
uint32_t OW, uint32_t OC, uint32_t OWS, uint32_t FH, uint32_t FW,
uint32_t PH, uint32_t PW, uint32_t SH, uint32_t SW, uint32_t DH,
uint32_t DW, uint32_t LD);
if (flip) {
kern_ptr = im2col_kern<true>;
} else {
kern_ptr = im2col_kern<false>;
}
for (size_t n = 0; n < N; ++n) {
kern_ptr<<<nblocks, nthreads, 0, stream>>>(
src + n * IH * IW * IWS, unrolled + n * OH * OW * LD, N, IH, IW, IC,
IWS, OH, OW, OC, OWS, FH, FW, PH, PW, SH, SW, DH, DW, LD);
}
after_kernel_launch();
}
// vim: syntax=cpp.doxygen
|
1932ee2091f1d4a8e93645c870893cc9e841665a.hip
|
// !!! This is a file automatically generated by hipify!!!
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "functors.hpp"
#include "types.hpp"
#include "vector_traits.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include "../cuda4dnn/kernels/scale_shift.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, class ActivationOp, std::size_t N>
__global__ void generic_op_vec(Span<T> output, View<T> input, const typename ActivationOp::Params params) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
ActivationOp activation_op(params);
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = activation_op(vec.data[j]);
v_store(output_vPtr[i], vec);
}
}
template <class T, std::size_t N>
__global__ void axiswise_relu_vec(Span<T> output, View<T> input, size_type inner_size, View<T> slope) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
const index_type c = (i / inner_size) % slope.size();
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = vec.data[j] > T(0) ? vec.data[j] : vec.data[j] * slope[c];
v_store(output_vPtr[i], vec);
}
}
} /* namespace raw */
template <class T, class ActivationOp, std::size_t N> static
void launch_vectorized_generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
auto kernel = raw::generic_op_vec<T, ActivationOp, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, params);
}
template <class T, class ActivationOp> static
void generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params = {}) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) {
launch_vectorized_generic_op<T, ActivationOp, 4>(stream, output, input, params);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) {
launch_vectorized_generic_op<T, ActivationOp, 2>(stream, output, input, params);
} else {
launch_vectorized_generic_op<T, ActivationOp, 1>(stream, output, input, params);
}
}
template <class T>
void relu(const Stream& stream, Span<T> output, View<T> input, T slope) {
generic_op<T, ReLUFunctor<T>>(stream, output, input, {slope});
}
template <class T>
void clipped_relu(const Stream& stream, Span<T> output, View<T> input, T floor, T ceiling) {
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling));
generic_op<T, ClippedReLUFunctor<T>>(stream, output, input, {floor, ceiling});
}
template <class T>
void tanh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, TanHFunctor<T>>(stream, output, input);
}
template <class T>
void swish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SwishFunctor<T>>(stream, output, input);
}
template <class T>
void mish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, MishFunctor<T>>(stream, output, input);
}
template <class T>
void sigmoid(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SigmoidFunctor<T>>(stream, output, input);
}
template <class T>
void elu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, ELUFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void bnll(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, BNLLFunctor<T>>(stream, output, input);
}
template <class T>
void ceil(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CeilFunctor<T>>(stream, output, input);
}
template <class T>
void floor(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, FloorFunctor<T>>(stream, output, input);
}
template <class T>
void log(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, LogFunctor<T>>(stream, output, input);
}
template <class T>
void rint(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, RintFunctor<T>>(stream, output, input);
}
template <class T>
void sqrt(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SqrtFunctor<T>>(stream, output, input);
}
template <class T>
void not_k(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, NotFunctor<T>>(stream, output, input);
}
template <class T>
void acos(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AcosFunctor<T>>(stream, output, input);
}
template <class T>
void acosh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AcoshFunctor<T>>(stream, output, input);
}
template <class T>
void asin(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AsinFunctor<T>>(stream, output, input);
}
template <class T>
void asinh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AsinhFunctor<T>>(stream, output, input);
}
template <class T>
void atan(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AtanFunctor<T>>(stream, output, input);
}
template <class T>
void atanh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AtanhFunctor<T>>(stream, output, input);
}
template <class T>
void cos(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CosFunctor<T>>(stream, output, input);
}
template <class T>
void cosh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CoshFunctor<T>>(stream, output, input);
}
template <class T>
void erf(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, ErfFunctor<T>>(stream, output, input);
}
template <class T>
void hardswish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, HardSwishFunctor<T>>(stream, output, input);
}
template <class T>
void sin(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SinFunctor<T>>(stream, output, input);
}
template <class T>
void sinh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SinhFunctor<T>>(stream, output, input);
}
template <class T>
void softplus(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SoftplusFunctor<T>>(stream, output, input);
}
template <class T>
void softsign(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SoftsignFunctor<T>>(stream, output, input);
}
template <class T>
void tan(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, TanFunctor<T>>(stream, output, input);
}
template <class T>
void celu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, CeluFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void hardsigmoid(const Stream& stream, Span<T> output, View<T> input, T alpha, T beta) {
generic_op<T, HardSigmoidFunctor<T>>(stream, output, input, {alpha, beta});
}
template <class T>
void selu(const Stream& stream, Span<T> output, View<T> input, T alpha, T gamma) {
generic_op<T, SeluFunctor<T>>(stream, output, input, {alpha, gamma});
}
template <class T>
void sign(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SignFunctor<T>>(stream, output, input);
}
template <class T>
void shrink(const Stream& stream, Span<T> output, View<T> input, T bias, T lambd) {
generic_op<T, ShrinkFunctor<T>>(stream, output, input, {bias, lambd});
}
template <class T>
void reciprocal(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SignFunctor<T>>(stream, output, input);
}
template <class T>
void thresholdedrelu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, ThresholdedReluFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void abs(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AbsFunctor<T>>(stream, output, input);
}
template <class T>
void power(const Stream& stream, Span<T> output, View<T> input, T exp, T scale, T shift) {
CV_Assert(input.size() == output.size());
if (static_cast<float>(exp) == 1.0f) {
scale1_with_bias1(stream, output, input, scale, shift);
return;
}
generic_op<T, PowerFunctor<T>>(stream, output, input, {exp, scale, shift});
}
template <class T>
void exp(const Stream& stream, Span<T> output, View<T> input, T normScale, T normShift) {
generic_op<T, ExpFunctor<T>>(stream, output, input, {normScale, normShift});
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void relu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void tanh<__half>(const Stream&, Span<__half>, View<__half>);
template void swish<__half>(const Stream&, Span<__half>, View<__half>);
template void mish<__half>(const Stream&, Span<__half>, View<__half>);
template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>);
template void elu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input);
template void bnll<__half>(const Stream&, Span<__half>, View<__half>);
template void ceil<__half>(const Stream&, Span<__half>, View<__half>);
template void floor<__half>(const Stream&, Span<__half>, View<__half>);
template void log<__half>(const Stream&, Span<__half>, View<__half>);
template void rint<__half>(const Stream&, Span<__half>, View<__half>);
template void sqrt<__half>(const Stream&, Span<__half>, View<__half>);
template void not_k<__half>(const Stream&, Span<__half>, View<__half>);
template void acos<__half>(const Stream&, Span<__half>, View<__half>);
template void acosh<__half>(const Stream&, Span<__half>, View<__half>);
template void asin<__half>(const Stream&, Span<__half>, View<__half>);
template void asinh<__half>(const Stream&, Span<__half>, View<__half>);
template void atan<__half>(const Stream&, Span<__half>, View<__half>);
template void atanh<__half>(const Stream&, Span<__half>, View<__half>);
template void cos<__half>(const Stream&, Span<__half>, View<__half>);
template void cosh<__half>(const Stream&, Span<__half>, View<__half>);
template void erf<__half>(const Stream&, Span<__half>, View<__half>);
template void hardswish<__half>(const Stream&, Span<__half>, View<__half>);
template void sin<__half>(const Stream&, Span<__half>, View<__half>);
template void sinh<__half>(const Stream&, Span<__half>, View<__half>);
template void softplus<__half>(const Stream&, Span<__half>, View<__half>);
template void softsign<__half>(const Stream&, Span<__half>, View<__half>);
template void tan<__half>(const Stream&, Span<__half>, View<__half>);
template void celu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void hardsigmoid<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void selu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void thresholdedrelu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void power<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half);
template void exp<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void sign<__half>(const Stream&, Span<__half>, View<__half>);
template void shrink<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void reciprocal<__half>(const Stream&, Span<__half>, View<__half>);
#endif
template void relu<float>(const Stream&, Span<float>, View<float>, float);
template void clipped_relu<float>(const Stream&, Span<float>, View<float>, float, float);
template void tanh<float>(const Stream&, Span<float>, View<float>);
template void swish<float>(const Stream&, Span<float>, View<float>);
template void mish<float>(const Stream&, Span<float>, View<float>);
template void sigmoid<float>(const Stream&, Span<float>, View<float>);
template void elu<float>(const Stream&, Span<float>, View<float>, float);
template void abs<float>(const Stream& stream, Span<float> output, View<float> input);
template void bnll<float>(const Stream&, Span<float>, View<float>);
template void ceil<float>(const Stream&, Span<float>, View<float>);
template void floor<float>(const Stream&, Span<float>, View<float>);
template void log<float>(const Stream&, Span<float>, View<float>);
template void rint<float>(const Stream&, Span<float>, View<float>);
template void sqrt<float>(const Stream&, Span<float>, View<float>);
template void not_k<float>(const Stream&, Span<float>, View<float>);
template void acos<float>(const Stream&, Span<float>, View<float>);
template void acosh<float>(const Stream&, Span<float>, View<float>);
template void asin<float>(const Stream&, Span<float>, View<float>);
template void asinh<float>(const Stream&, Span<float>, View<float>);
template void atan<float>(const Stream&, Span<float>, View<float>);
template void atanh<float>(const Stream&, Span<float>, View<float>);
template void cos<float>(const Stream&, Span<float>, View<float>);
template void cosh<float>(const Stream&, Span<float>, View<float>);
template void erf<float>(const Stream&, Span<float>, View<float>);
template void hardswish<float>(const Stream&, Span<float>, View<float>);
template void sin<float>(const Stream&, Span<float>, View<float>);
template void sinh<float>(const Stream&, Span<float>, View<float>);
template void softplus<float>(const Stream&, Span<float>, View<float>);
template void softsign<float>(const Stream&, Span<float>, View<float>);
template void tan<float>(const Stream&, Span<float>, View<float>);
template void celu<float>(const Stream&, Span<float>, View<float>, float);
template void hardsigmoid<float>(const Stream&, Span<float>, View<float>, float, float);
template void selu<float>(const Stream&, Span<float>, View<float>, float, float);
template void thresholdedrelu<float>(const Stream&, Span<float>, View<float>, float);
template void power<float>(const Stream&, Span<float>, View<float>, float, float, float);
template void exp<float>(const Stream&, Span<float>, View<float>, float, float);
template void sign<float>(const Stream&, Span<float>, View<float>);
template void shrink<float>(const Stream&, Span<float>, View<float>, float, float);
template void reciprocal<float>(const Stream&, Span<float>, View<float>);
template <class T, std::size_t N> static
void launch_vectorized_axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
CV_Assert(inner_size % N == 0);
auto kernel = raw::axiswise_relu_vec<T, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, inner_size / N, slope);
}
template <class T>
void axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && inner_size % 4 == 0) {
launch_vectorized_axiswise_relu<T, 4>(stream, output, input, inner_size, slope);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && inner_size % 2 == 0) {
launch_vectorized_axiswise_relu<T, 2>(stream, output, input, inner_size, slope);
} else {
launch_vectorized_axiswise_relu<T, 1>(stream, output, input, inner_size, slope);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void axiswise_relu<__half>(const Stream&, Span<__half>, View<__half>, std::size_t, View<__half>);
#endif
template void axiswise_relu<float>(const Stream&, Span<float>, View<float>, std::size_t, View<float>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
1932ee2091f1d4a8e93645c870893cc9e841665a.cu
|
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "functors.hpp"
#include "types.hpp"
#include "vector_traits.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include "../cuda4dnn/kernels/scale_shift.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, class ActivationOp, std::size_t N>
__global__ void generic_op_vec(Span<T> output, View<T> input, const typename ActivationOp::Params params) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
ActivationOp activation_op(params);
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = activation_op(vec.data[j]);
v_store(output_vPtr[i], vec);
}
}
template <class T, std::size_t N>
__global__ void axiswise_relu_vec(Span<T> output, View<T> input, size_type inner_size, View<T> slope) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto input_vPtr = vector_type::get_pointer(input.data());
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
const index_type c = (i / inner_size) % slope.size();
vector_type vec;
v_load(vec, input_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec.data[j] = vec.data[j] > T(0) ? vec.data[j] : vec.data[j] * slope[c];
v_store(output_vPtr[i], vec);
}
}
} /* namespace raw */
template <class T, class ActivationOp, std::size_t N> static
void launch_vectorized_generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
auto kernel = raw::generic_op_vec<T, ActivationOp, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, params);
}
template <class T, class ActivationOp> static
void generic_op(const Stream& stream, Span<T> output, View<T> input, const typename ActivationOp::Params& params = {}) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) {
launch_vectorized_generic_op<T, ActivationOp, 4>(stream, output, input, params);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) {
launch_vectorized_generic_op<T, ActivationOp, 2>(stream, output, input, params);
} else {
launch_vectorized_generic_op<T, ActivationOp, 1>(stream, output, input, params);
}
}
template <class T>
void relu(const Stream& stream, Span<T> output, View<T> input, T slope) {
generic_op<T, ReLUFunctor<T>>(stream, output, input, {slope});
}
template <class T>
void clipped_relu(const Stream& stream, Span<T> output, View<T> input, T floor, T ceiling) {
CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling));
generic_op<T, ClippedReLUFunctor<T>>(stream, output, input, {floor, ceiling});
}
template <class T>
void tanh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, TanHFunctor<T>>(stream, output, input);
}
template <class T>
void swish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SwishFunctor<T>>(stream, output, input);
}
template <class T>
void mish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, MishFunctor<T>>(stream, output, input);
}
template <class T>
void sigmoid(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SigmoidFunctor<T>>(stream, output, input);
}
template <class T>
void elu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, ELUFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void bnll(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, BNLLFunctor<T>>(stream, output, input);
}
template <class T>
void ceil(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CeilFunctor<T>>(stream, output, input);
}
template <class T>
void floor(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, FloorFunctor<T>>(stream, output, input);
}
template <class T>
void log(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, LogFunctor<T>>(stream, output, input);
}
template <class T>
void rint(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, RintFunctor<T>>(stream, output, input);
}
template <class T>
void sqrt(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SqrtFunctor<T>>(stream, output, input);
}
template <class T>
void not_k(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, NotFunctor<T>>(stream, output, input);
}
template <class T>
void acos(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AcosFunctor<T>>(stream, output, input);
}
template <class T>
void acosh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AcoshFunctor<T>>(stream, output, input);
}
template <class T>
void asin(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AsinFunctor<T>>(stream, output, input);
}
template <class T>
void asinh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AsinhFunctor<T>>(stream, output, input);
}
template <class T>
void atan(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AtanFunctor<T>>(stream, output, input);
}
template <class T>
void atanh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AtanhFunctor<T>>(stream, output, input);
}
template <class T>
void cos(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CosFunctor<T>>(stream, output, input);
}
template <class T>
void cosh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, CoshFunctor<T>>(stream, output, input);
}
template <class T>
void erf(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, ErfFunctor<T>>(stream, output, input);
}
template <class T>
void hardswish(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, HardSwishFunctor<T>>(stream, output, input);
}
template <class T>
void sin(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SinFunctor<T>>(stream, output, input);
}
template <class T>
void sinh(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SinhFunctor<T>>(stream, output, input);
}
template <class T>
void softplus(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SoftplusFunctor<T>>(stream, output, input);
}
template <class T>
void softsign(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SoftsignFunctor<T>>(stream, output, input);
}
template <class T>
void tan(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, TanFunctor<T>>(stream, output, input);
}
template <class T>
void celu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, CeluFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void hardsigmoid(const Stream& stream, Span<T> output, View<T> input, T alpha, T beta) {
generic_op<T, HardSigmoidFunctor<T>>(stream, output, input, {alpha, beta});
}
template <class T>
void selu(const Stream& stream, Span<T> output, View<T> input, T alpha, T gamma) {
generic_op<T, SeluFunctor<T>>(stream, output, input, {alpha, gamma});
}
template <class T>
void sign(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SignFunctor<T>>(stream, output, input);
}
template <class T>
void shrink(const Stream& stream, Span<T> output, View<T> input, T bias, T lambd) {
generic_op<T, ShrinkFunctor<T>>(stream, output, input, {bias, lambd});
}
template <class T>
void reciprocal(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, SignFunctor<T>>(stream, output, input);
}
template <class T>
void thresholdedrelu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
generic_op<T, ThresholdedReluFunctor<T>>(stream, output, input, {alpha});
}
template <class T>
void abs(const Stream& stream, Span<T> output, View<T> input) {
generic_op<T, AbsFunctor<T>>(stream, output, input);
}
template <class T>
void power(const Stream& stream, Span<T> output, View<T> input, T exp, T scale, T shift) {
CV_Assert(input.size() == output.size());
if (static_cast<float>(exp) == 1.0f) {
scale1_with_bias1(stream, output, input, scale, shift);
return;
}
generic_op<T, PowerFunctor<T>>(stream, output, input, {exp, scale, shift});
}
template <class T>
void exp(const Stream& stream, Span<T> output, View<T> input, T normScale, T normShift) {
generic_op<T, ExpFunctor<T>>(stream, output, input, {normScale, normShift});
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void relu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void tanh<__half>(const Stream&, Span<__half>, View<__half>);
template void swish<__half>(const Stream&, Span<__half>, View<__half>);
template void mish<__half>(const Stream&, Span<__half>, View<__half>);
template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>);
template void elu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input);
template void bnll<__half>(const Stream&, Span<__half>, View<__half>);
template void ceil<__half>(const Stream&, Span<__half>, View<__half>);
template void floor<__half>(const Stream&, Span<__half>, View<__half>);
template void log<__half>(const Stream&, Span<__half>, View<__half>);
template void rint<__half>(const Stream&, Span<__half>, View<__half>);
template void sqrt<__half>(const Stream&, Span<__half>, View<__half>);
template void not_k<__half>(const Stream&, Span<__half>, View<__half>);
template void acos<__half>(const Stream&, Span<__half>, View<__half>);
template void acosh<__half>(const Stream&, Span<__half>, View<__half>);
template void asin<__half>(const Stream&, Span<__half>, View<__half>);
template void asinh<__half>(const Stream&, Span<__half>, View<__half>);
template void atan<__half>(const Stream&, Span<__half>, View<__half>);
template void atanh<__half>(const Stream&, Span<__half>, View<__half>);
template void cos<__half>(const Stream&, Span<__half>, View<__half>);
template void cosh<__half>(const Stream&, Span<__half>, View<__half>);
template void erf<__half>(const Stream&, Span<__half>, View<__half>);
template void hardswish<__half>(const Stream&, Span<__half>, View<__half>);
template void sin<__half>(const Stream&, Span<__half>, View<__half>);
template void sinh<__half>(const Stream&, Span<__half>, View<__half>);
template void softplus<__half>(const Stream&, Span<__half>, View<__half>);
template void softsign<__half>(const Stream&, Span<__half>, View<__half>);
template void tan<__half>(const Stream&, Span<__half>, View<__half>);
template void celu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void hardsigmoid<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void selu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void thresholdedrelu<__half>(const Stream&, Span<__half>, View<__half>, __half);
template void power<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half);
template void exp<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void sign<__half>(const Stream&, Span<__half>, View<__half>);
template void shrink<__half>(const Stream&, Span<__half>, View<__half>, __half, __half);
template void reciprocal<__half>(const Stream&, Span<__half>, View<__half>);
#endif
template void relu<float>(const Stream&, Span<float>, View<float>, float);
template void clipped_relu<float>(const Stream&, Span<float>, View<float>, float, float);
template void tanh<float>(const Stream&, Span<float>, View<float>);
template void swish<float>(const Stream&, Span<float>, View<float>);
template void mish<float>(const Stream&, Span<float>, View<float>);
template void sigmoid<float>(const Stream&, Span<float>, View<float>);
template void elu<float>(const Stream&, Span<float>, View<float>, float);
template void abs<float>(const Stream& stream, Span<float> output, View<float> input);
template void bnll<float>(const Stream&, Span<float>, View<float>);
template void ceil<float>(const Stream&, Span<float>, View<float>);
template void floor<float>(const Stream&, Span<float>, View<float>);
template void log<float>(const Stream&, Span<float>, View<float>);
template void rint<float>(const Stream&, Span<float>, View<float>);
template void sqrt<float>(const Stream&, Span<float>, View<float>);
template void not_k<float>(const Stream&, Span<float>, View<float>);
template void acos<float>(const Stream&, Span<float>, View<float>);
template void acosh<float>(const Stream&, Span<float>, View<float>);
template void asin<float>(const Stream&, Span<float>, View<float>);
template void asinh<float>(const Stream&, Span<float>, View<float>);
template void atan<float>(const Stream&, Span<float>, View<float>);
template void atanh<float>(const Stream&, Span<float>, View<float>);
template void cos<float>(const Stream&, Span<float>, View<float>);
template void cosh<float>(const Stream&, Span<float>, View<float>);
template void erf<float>(const Stream&, Span<float>, View<float>);
template void hardswish<float>(const Stream&, Span<float>, View<float>);
template void sin<float>(const Stream&, Span<float>, View<float>);
template void sinh<float>(const Stream&, Span<float>, View<float>);
template void softplus<float>(const Stream&, Span<float>, View<float>);
template void softsign<float>(const Stream&, Span<float>, View<float>);
template void tan<float>(const Stream&, Span<float>, View<float>);
template void celu<float>(const Stream&, Span<float>, View<float>, float);
template void hardsigmoid<float>(const Stream&, Span<float>, View<float>, float, float);
template void selu<float>(const Stream&, Span<float>, View<float>, float, float);
template void thresholdedrelu<float>(const Stream&, Span<float>, View<float>, float);
template void power<float>(const Stream&, Span<float>, View<float>, float, float, float);
template void exp<float>(const Stream&, Span<float>, View<float>, float, float);
template void sign<float>(const Stream&, Span<float>, View<float>);
template void shrink<float>(const Stream&, Span<float>, View<float>, float, float);
template void reciprocal<float>(const Stream&, Span<float>, View<float>);
template <class T, std::size_t N> static
void launch_vectorized_axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N));
CV_Assert(inner_size % N == 0);
auto kernel = raw::axiswise_relu_vec<T, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input, inner_size / N, slope);
}
template <class T>
void axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) {
CV_Assert(input.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && inner_size % 4 == 0) {
launch_vectorized_axiswise_relu<T, 4>(stream, output, input, inner_size, slope);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && inner_size % 2 == 0) {
launch_vectorized_axiswise_relu<T, 2>(stream, output, input, inner_size, slope);
} else {
launch_vectorized_axiswise_relu<T, 1>(stream, output, input, inner_size, slope);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void axiswise_relu<__half>(const Stream&, Span<__half>, View<__half>, std::size_t, View<__half>);
#endif
template void axiswise_relu<float>(const Stream&, Span<float>, View<float>, std::size_t, View<float>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
a3ac11cd900cda64aa7db0abc6092b3b7fe8ab7b.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// auto-generated by op2.m on 20-Dec-2011 16:24:10
//
// header
#include "op_lib_cpp.h"
#include "op_cuda_rt_support.h"
#include "op_cuda_reduction.h"
// global constants
#ifndef MAX_CONST_SIZE
#define MAX_CONST_SIZE 128
#endif
__constant__ float alpha;
void op_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
cutilSafeCall(hipMemcpyToSymbol(name, dat, dim*size));
}
// user kernel files
#include "res_kernel.hip"
#include "update_kernel.hip"
|
a3ac11cd900cda64aa7db0abc6092b3b7fe8ab7b.cu
|
//
// auto-generated by op2.m on 20-Dec-2011 16:24:10
//
// header
#include "op_lib_cpp.h"
#include "op_cuda_rt_support.h"
#include "op_cuda_reduction.h"
// global constants
#ifndef MAX_CONST_SIZE
#define MAX_CONST_SIZE 128
#endif
__constant__ float alpha;
void op_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
cutilSafeCall(cudaMemcpyToSymbol(name, dat, dim*size));
}
// user kernel files
#include "res_kernel.cu"
#include "update_kernel.cu"
|
ac63652a3debb9b92fd2c355acd002bf2b7f91a9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
/* This Source Code Form is subject to the terms of the Mozilla Public */
/* License, v. 2.0. If a copy of the MPL was not distributed with this */
/* file, You can obtain one at http ://mozilla.org/MPL/2.0/. */
/* Copyright (c) 2017 Alex Shovkoplyas VE3NEA */
#include <iostream>
#include <iomanip>
#include <time.h>
#include <math.h>
#include <ctime>
#include <memory>
#include <algorithm>
#include "cuda_code.h"
#include "cuda_code.cuh"
#include "cuda_err_check.h"
#include "plugboard.h"
#include "ngrams.h"
#include "iterator.h"
__constant__ int8_t d_ciphertext[MAX_MESSAGE_LENGTH];
__constant__ Wiring d_wiring;
__constant__ Key d_key;
__constant__ NGRAM_DATA_TYPE d_unigrams[ALPSIZE];
__constant__ NGRAM_DATA_TYPE d_bigrams[ALPSIZE][ALPSIZE];
__constant__ int8_t d_order[ALPSIZE];
__constant__ int8_t d_plugs[ALPSIZE];
__constant__ bool d_fixed[ALPSIZE];
Result * d_temp;
Task h_task;
//------------------------------------------------------------------------------
// scrambler
//------------------------------------------------------------------------------
__host__ __device__
int8_t mod26(const int16_t x)
{
return (ALPSIZE * 2 + x) % ALPSIZE;
}
void SetUpScramblerMemory()
{
//wiring to gpu
CUDA_CHECK(hipMemcpyToSymbol(d_wiring, &wiring, sizeof(Wiring)));
//memory for scrambler
CUDA_CHECK(hipMallocPitch(&h_task.scrambler.data, &h_task.scrambler.pitch,
28, ALPSIZE_TO3));
}
__global__
void GenerateScramblerKernel(const Task task)
{
__shared__ const int8_t * reflector;
__shared__ const int8_t * g_rotor;
__shared__ const int8_t * l_rotor;
__shared__ const int8_t * m_rotor;
__shared__ const int8_t * r_rotor;
__shared__ const int8_t * g_rev_rotor;
__shared__ const int8_t * l_rev_rotor;
__shared__ const int8_t * m_rev_rotor;
__shared__ const int8_t * r_rev_rotor;
__shared__ int8_t r_core_position;
__shared__ int8_t m_core_position;
__shared__ int8_t l_core_position;
__shared__ int8_t g_core_position;
__shared__ int8_t * entry;
if (threadIdx.x == 0)
{
//wirings
reflector = d_wiring.reflectors[d_key.stru.ukwnum];
g_rotor = d_wiring.rotors[d_key.stru.g_slot];
l_rotor = d_wiring.rotors[d_key.stru.l_slot];
m_rotor = d_wiring.rotors[d_key.stru.m_slot];
r_rotor = d_wiring.rotors[d_key.stru.r_slot];
g_rev_rotor = d_wiring.reverse_rotors[d_key.stru.g_slot];
l_rev_rotor = d_wiring.reverse_rotors[d_key.stru.l_slot];
m_rev_rotor = d_wiring.reverse_rotors[d_key.stru.m_slot];
r_rev_rotor = d_wiring.reverse_rotors[d_key.stru.r_slot];
//core positions
r_core_position = blockIdx.x;
m_core_position = blockIdx.y;
l_core_position = blockIdx.z;
g_core_position = mod26(d_key.sett.g_mesg - d_key.sett.g_ring);
//address of scrambler entry
entry = task.scrambler.data + task.scrambler.pitch * (
l_core_position * ALPSIZE * ALPSIZE +
m_core_position * ALPSIZE +
r_core_position);
}
__syncthreads();
//scramble one char
int8_t ch_in = threadIdx.x;
int8_t ch_out = ch_in;
ch_out = r_rotor[mod26(ch_out + r_core_position)] - r_core_position;
ch_out = m_rotor[mod26(ch_out + m_core_position)] - m_core_position;
ch_out = l_rotor[mod26(ch_out + l_core_position)] - l_core_position;
if (d_key.stru.model == enigmaM4)
{
ch_out = g_rotor[mod26(ch_out + g_core_position)] - g_core_position;
ch_out = reflector[mod26(ch_out)];
ch_out = g_rev_rotor[mod26(ch_out + g_core_position)] - g_core_position;
}
else
{
ch_out = reflector[mod26(ch_out)];
}
ch_out = l_rev_rotor[mod26(ch_out + l_core_position)] - l_core_position;
ch_out = m_rev_rotor[mod26(ch_out + m_core_position)] - m_core_position;
ch_out = r_rev_rotor[mod26(ch_out + r_core_position)] - r_core_position;
//char to scrambler
entry[ch_in] = mod26(ch_out);
}
void GenerateScrambler(const Key & key)
{
//key to gpu
CUDA_CHECK(hipMemcpyToSymbol(d_key, &key, sizeof(Key)));
//block and grid dimensions
dim3 dimBlock(ALPSIZE);
dim3 dimGrid(ALPSIZE, ALPSIZE, ALPSIZE);
//run kernel
GenerateScramblerKernel << < dimGrid, dimBlock >> > (h_task);
CUDA_CHECK(hipDeviceSynchronize());
}
__host__ __device__
int ComputeScramblerIndex(int char_pos,
const ScramblerStructure & stru,
const RotorSettings & sett, const Wiring & wiring)
{
//retrieve notch info
const int8_t * r_notch = wiring.notch_positions[stru.r_slot];
const int8_t * m_notch = wiring.notch_positions[stru.m_slot];
//period of the rotor turnovers
int m_period = (r_notch[1] == NONE) ? ALPSIZE : HALF_ALPSIZE;
int l_period = (m_notch[1] == NONE) ? ALPSIZE : HALF_ALPSIZE;
l_period = --l_period * m_period;
//current wheel position relative to the last notch
int r_after_notch = sett.r_mesg - r_notch[0];
if (r_after_notch < 0) r_after_notch += ALPSIZE;
if (r_notch[1] != NONE && r_after_notch >= (r_notch[1] - r_notch[0]))
r_after_notch -= r_notch[1] - r_notch[0];
int m_after_notch = sett.m_mesg - m_notch[0];
if (m_after_notch < 0) m_after_notch += ALPSIZE;
if (m_notch[1] != NONE && m_after_notch >= (m_notch[1] - m_notch[0]))
m_after_notch -= m_notch[1] - m_notch[0];
//middle wheel turnover phase
int m_phase = r_after_notch - 1;
if (m_phase < 0) m_phase += m_period;
//left wheel turnover phase
int l_phase = m_phase - 1 + (m_after_notch - 1) * m_period;
if (l_phase < 0) l_phase += l_period;
//hacks
if (m_after_notch == 0) l_phase += m_period;
if (m_after_notch == 1 && r_after_notch == 1)
l_phase -= l_period; //effectively sets l_phase to -1
if (m_after_notch == 0 && r_after_notch == 0)
{
m_phase -= m_period;
l_phase -= m_period;
if (char_pos == 0) l_phase++;
}
//save debug info
// r_after_notch_display = r_after_notch;
// m_after_notch_display = m_after_notch;
// l_phase_display = l_phase;
//number of turnovers
int m_steps = (m_phase + char_pos + 1) / m_period;
int l_steps = (l_phase + char_pos + 1) / l_period;
//double step of the middle wheel
m_steps += l_steps;
//rotor core poistions to scrambling table index
return mod26(sett.l_mesg - sett.l_ring + l_steps) * ALPSIZE_TO2 +
mod26(sett.m_mesg - sett.m_ring + m_steps) * ALPSIZE +
mod26(sett.r_mesg - sett.r_ring + char_pos + 1);
}
__host__ __device__
TurnoverLocation GetTurnoverLocation(const ScramblerStructure & stru,
const RotorSettings sett, int ciphertext_length, const Wiring & wiring)
{
//rotors with two notches
if (stru.r_slot > rotV && sett.r_ring >= HALF_ALPSIZE)
return toAfterMessage;
if (stru.m_slot > rotV && sett.m_ring >= HALF_ALPSIZE)
return toAfterMessage;
//does the left hand rotor turn right before the message?
int8_t l_core_before = mod26(sett.l_mesg - sett.l_ring);
int8_t l_core_first = ComputeScramblerIndex(0, stru, sett, wiring)
/ ALPSIZE_TO2;
if (l_core_first != l_core_before) return toBeforeMessage;
//does it turn during the message?
int8_t l_core_last =
ComputeScramblerIndex(ciphertext_length-1, stru, sett, wiring)
/ ALPSIZE_TO2;
if (l_core_last != l_core_first) return toDuringMessage;
return toAfterMessage;
}
//move the relevant part of the scrambler from global to shared memory
//and shuffle it to avoid bank conflicts
__device__
const int8_t * ScramblerToShared(const int8_t * global_scrambling_table)
{
//global: ALPSIZE bytes at sequential addresses
const int32_t * src =
reinterpret_cast<const int32_t *>(global_scrambling_table);
//shared: same bytes in groups of 4 at a stride of 128
extern __shared__ int8_t shared_scrambling_table[];
int32_t * dst = reinterpret_cast<int32_t *>(shared_scrambling_table);
//copy ALPSIZE bytes as 7 x 32-bit words
int idx = (threadIdx.x & ~31) * 7 + (threadIdx.x & 31);
for (int i = 0; i < 7; ++i) dst[idx + 32 * i] = src[i];
return &shared_scrambling_table[idx * 4];
}
//------------------------------------------------------------------------------
// constants to device
//------------------------------------------------------------------------------
void CipherTextToDevice(string ciphertext_string)
{
std::vector<int8_t> cipher = TextToNumbers(ciphertext_string);
int8_t * cipher_data = cipher.data();
CUDA_CHECK(hipMemcpyToSymbol(d_ciphertext, cipher_data, cipher.size()));
h_task.count = (int)cipher.size();
}
void PlugboardStringToDevice(string plugboard_string)
{
Plugboard plugboard;
plugboard.FromString(plugboard_string);
PlugboardToDevice(plugboard);
}
void PlugboardToDevice(const Plugboard & plugboard)
{
CUDA_CHECK(hipMemcpyToSymbol(d_plugs, plugboard.plugs, ALPSIZE));
CUDA_CHECK(hipMemcpyToSymbol(d_fixed, plugboard.fixed,
sizeof(bool) * ALPSIZE));
}
void OrderToDevice(const int8_t * order)
{
CUDA_CHECK(hipMemcpyToSymbol(d_order, order, ALPSIZE));
}
void InitializeArrays(const string cipher_string, int turnover_modes,
int score_kinds, int digits)
{
//d_ciphertext
CipherTextToDevice(cipher_string);
//d_wiring
SetUpScramblerMemory();
//allow_turnover
h_task.turnover_modes = turnover_modes;
//use unigrams
h_task.score_kinds = score_kinds;
//d_results
int count = (int)pow(ALPSIZE, digits);
SetUpResultsMemory(count);
}
//------------------------------------------------------------------------------
// score
//------------------------------------------------------------------------------
void NgramsToDevice(const string & uni_filename,
const string & bi_filename, const string & tri_filename)
{
if (uni_filename != "")
{
Unigrams unigrams;
unigrams.LoadFromFile(uni_filename);
CUDA_CHECK(hipMemcpyToSymbol(d_unigrams, unigrams.data, sizeof(d_unigrams)));
}
if (bi_filename != "")
{
Bigrams bigrams;
bigrams.LoadFromFile(bi_filename);
CUDA_CHECK(hipMemcpyToSymbol(d_bigrams, bigrams.data, sizeof(d_bigrams)));
}
if (tri_filename != "")
{
//trigram data
Trigrams trigrams_obj;
trigrams_obj.LoadFromFile(tri_filename);
//non-pitched array in device memory. slightly faster than pitched
CUDA_CHECK(hipMalloc(&h_task.trigrams.data,
sizeof(NGRAM_DATA_TYPE) * ALPSIZE_TO3));
h_task.trigrams.pitch = sizeof(NGRAM_DATA_TYPE) * ALPSIZE;
//data to device
CUDA_CHECK(hipMemcpy(h_task.trigrams.data, trigrams_obj.data,
sizeof(NGRAM_DATA_TYPE) * ALPSIZE_TO3, hipMemcpyHostToDevice));
}
}
__device__
int8_t Decode(const int8_t * plugboard, const int8_t * scrambling_table)
{
int8_t c = d_ciphertext[threadIdx.x];
c = plugboard[c];
c = scrambling_table[(c & ~3) * 32 + (c & 3)];
c = plugboard[c];
return c;
}
//MIN_MESSAGE_LENGTH <= count <= MAX_MESSAGE_LENGTH
__device__
void Sum(int count, volatile int * data, int * sum)
{
if ((threadIdx.x + 128) < count) data[threadIdx.x] += data[128 + threadIdx.x];
__syncthreads();
if (threadIdx.x < 64 && (threadIdx.x + 64) < count)
data[threadIdx.x] += data[64 + threadIdx.x];
__syncthreads();
if (threadIdx.x < 32)
{
if ((threadIdx.x + 32) < count) data[threadIdx.x] += data[32 + threadIdx.x];
if ((threadIdx.x + 16) < count) data[threadIdx.x] += data[16 + threadIdx.x];
data[threadIdx.x] += data[8 + threadIdx.x];
data[threadIdx.x] += data[4 + threadIdx.x];
data[threadIdx.x] += data[2 + threadIdx.x];
if (threadIdx.x == 0) *sum = data[0] + data[1];
}
__syncthreads();
}
#define HISTO_SIZE 32
__device__
void IcScore(Block & block, const int8_t * scrambling_table)
{
//init histogram
if (threadIdx.x < HISTO_SIZE) block.score_buf[threadIdx.x] = 0;
__syncthreads();
//compute histogram
if (threadIdx.x < block.count)
{
int8_t c = Decode(block.plugs, scrambling_table);
atomicAdd((int *)&block.score_buf[c], 1);
}
__syncthreads();
//TODO: try lookup table here, ic[MAX_MESSAGE_LENGTH]
if (threadIdx.x < HISTO_SIZE)
block.score_buf[threadIdx.x] *= block.score_buf[threadIdx.x] - 1;
//sum up
if (threadIdx.x < HISTO_SIZE / 2)
{
block.score_buf[threadIdx.x] += block.score_buf[threadIdx.x + 16];
block.score_buf[threadIdx.x] += block.score_buf[threadIdx.x + 8];
block.score_buf[threadIdx.x] += block.score_buf[threadIdx.x + 4];
block.score_buf[threadIdx.x] += block.score_buf[threadIdx.x + 2];
if (threadIdx.x == 0) block.score = block.score_buf[0] + block.score_buf[1];
}
__syncthreads();
}
//TODO: put unigram table to shared memory
__device__
void UniScore(Block & block, const int8_t * scrambling_table)
{
if (threadIdx.x < block.count)
{
int8_t c = Decode(block.plugs, scrambling_table);
block.score_buf[threadIdx.x] = block.unigrams[c];
}
__syncthreads();
Sum(block.count, block.score_buf, &block.score);
}
__device__
void BiScore(Block & block, const int8_t * scrambling_table)
{
if (threadIdx.x < block.count)
block.plain_text[threadIdx.x] = Decode(block.plugs, scrambling_table);
__syncthreads();
//TODO: trigrams are faster than bigrams.
//is it because trigrams are not declared as constants?
//or because their index is computed explicitly?
if (threadIdx.x < (block.count - 1))
block.score_buf[threadIdx.x] =
d_bigrams[block.plain_text[threadIdx.x]]
[block.plain_text[threadIdx.x + 1]];
__syncthreads();
Sum(block.count - 1, block.score_buf, &block.score);
}
//TODO: use bit mask in shared memory for non-zero elements
//676 bit flags for first 2 letters in tirgram
//save ~ half global memory reads
__device__
void TriScore(Block & block, const int8_t * scrambling_table)
{
//decode char
if (threadIdx.x < block.count)
block.plain_text[threadIdx.x] = Decode(block.plugs, scrambling_table);
__syncthreads();
//look up scores
if (threadIdx.x < (block.count - 2))
block.score_buf[threadIdx.x] = block.trigrams[
block.plain_text[threadIdx.x] * ALPSIZE_TO2 +
block.plain_text[threadIdx.x + 1] * ALPSIZE +
block.plain_text[threadIdx.x+2]];
__syncthreads();
Sum(block.count - 2, block.score_buf, &block.score);
}
__device__
void CalculateScore(Block & block, const int8_t * scrambling_table)
{
switch (block.score_kind)
{
case skTrigram: TriScore(block, scrambling_table); break;
case skBigram: BiScore(block, scrambling_table); break;
case skUnigram: UniScore(block, scrambling_table); break;
case skIC: IcScore(block, scrambling_table); break;
}
}
//------------------------------------------------------------------------------
// climber
//------------------------------------------------------------------------------
__device__ void TrySwap(int8_t i, int8_t k,
const int8_t * scrambling_table, Block & block)
{
__shared__ int old_score;
int8_t x, z;
old_score = block.score;
if (d_fixed[i] || d_fixed[k]) return;
if (threadIdx.x == 0)
{
x = block.plugs[i];
z = block.plugs[k];
if (x == k)
{
block.plugs[i] = i;
block.plugs[k] = k;
}
else
{
if (x != i)
{
block.plugs[i] = i;
block.plugs[x] = x;
};
if (z != k)
{
block.plugs[k] = k;
block.plugs[z] = z;
};
block.plugs[i] = k;
block.plugs[k] = i;
}
}
__syncthreads();
CalculateScore(block, scrambling_table);
if (threadIdx.x == 0 && block.score <= old_score)
{
block.score = old_score;
block.plugs[z] = k;
block.plugs[x] = i;
block.plugs[k] = z;
block.plugs[i] = x;
}
__syncthreads();
}
__device__ void MaximizeScore(Block & block, const int8_t * scrambling_table)
{
CalculateScore(block, scrambling_table);
for (int p = 0; p < ALPSIZE - 1; p++)
for (int q = p + 1; q < ALPSIZE; q++)
TrySwap(d_order[p], d_order[q], scrambling_table, block);
}
__global__ void ClimbKernel(const Task task)
{
__shared__ Block block;
__shared__ RotorSettings sett;
__shared__ bool skip_this_key;
__shared__ Result * result;
if (threadIdx.x < ALPSIZE)
{
block.plugs[threadIdx.x] = d_plugs[threadIdx.x];
block.unigrams[threadIdx.x] = d_unigrams[threadIdx.x];
}
if (threadIdx.x == 0)
{
block.trigrams = reinterpret_cast<int*>(task.trigrams.data);
block.count = task.count;
//ring and rotor settings to be tried
sett.g_ring = 0;
sett.l_ring = 0;
//depending on the grid size, ring positions
//either from grid index or fixed (from d_key)
sett.m_ring = (gridDim.y > ALPSIZE) ? blockIdx.y / ALPSIZE : d_key.sett.m_ring;
sett.r_ring = (gridDim.y > 1) ? blockIdx.y % ALPSIZE : d_key.sett.r_ring;
sett.g_mesg = d_key.sett.g_mesg;
sett.l_mesg = (gridDim.x > ALPSIZE_TO2) ? blockIdx.x / ALPSIZE_TO2 : d_key.sett.l_mesg;
sett.m_mesg = (gridDim.x > ALPSIZE) ? (blockIdx.x / ALPSIZE) % ALPSIZE : d_key.sett.m_mesg;
sett.r_mesg = (gridDim.x > 1) ? blockIdx.x % ALPSIZE : d_key.sett.r_mesg;
//element of results[] to store the output
int linear_idx = blockIdx.y * gridDim.x + blockIdx.x;
result = &task.results[linear_idx];
result->index = linear_idx;
result->score = 0;
skip_this_key = ((gridDim.x > 1) &&
(GetTurnoverLocation(d_key.stru, sett, block.count, d_wiring)
& task.turnover_modes) == 0);
}
__syncthreads();
if (skip_this_key) return;
const int8_t * scrambling_table;
if (threadIdx.x < block.count)
{
scrambling_table = task.scrambler.data +
ComputeScramblerIndex(threadIdx.x, d_key.stru, sett, d_wiring) *
task.scrambler.pitch;
scrambling_table = ScramblerToShared(scrambling_table);
}
//IC once
if (task.score_kinds & skIC)
{
block.score_kind = skIC;
MaximizeScore(block, scrambling_table);
}
//unigrams once
if (task.score_kinds & skUnigram)
{
block.score_kind = skUnigram;
MaximizeScore(block, scrambling_table);
}
//bigrams once
if (task.score_kinds & skBigram)
{
block.score_kind = skBigram;
MaximizeScore(block, scrambling_table);
}
//trigrams until convergence
if (task.score_kinds & skTrigram)
{
block.score_kind = skTrigram;
block.score = 0;
int old_score;
do
{
old_score = block.score;
MaximizeScore(block, scrambling_table);
}
while (block.score > old_score);
}
//copy plugboard solution to global results array;
if (threadIdx.x < ALPSIZE) result->plugs[threadIdx.x] = block.plugs[threadIdx.x];
if (threadIdx.x == 0) result->score = block.score;
}
Result Climb(int cipher_length, const Key & key, bool single_key)
{
try
{
CUDA_CHECK(hipMemcpyToSymbol(d_key, &key, sizeof(Key)));
int grid_size = single_key ? 1 : ALPSIZE_TO3;
int block_size = ::max(32, cipher_length);
int shared_scrambler_size = ((cipher_length + 31) & ~31) * 28;
ClimbKernel << <grid_size, block_size, shared_scrambler_size >> > (h_task);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
return GetBestResult(ALPSIZE_TO3);
}
catch (const std::runtime_error & e)
{
std::cout << e.what() << std::endl;
hipGetLastError();
hipDeviceReset();
hipSetDevice(0);
throw e;
}
}
//------------------------------------------------------------------------------
// results
//------------------------------------------------------------------------------
#define REDUCE_MAX_THREADS 256
void SetUpResultsMemory(int count)
{
CUDA_CHECK(hipMalloc((void**)&h_task.results, count * sizeof(Result)));
}
__device__ void SelectHigherScore(Result & a, const Result & b)
{
if (b.score > a.score) a = b;
}
__global__ void FindBestResultKernel(Result *g_idata, Result *g_odata,
unsigned int count)
{
__shared__ Result sdata[REDUCE_MAX_THREADS];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + tid;
if (i < count) sdata[tid] = g_idata[i];
else sdata[tid].score = 0;
if (i + blockDim.x < count) SelectHigherScore(sdata[tid], g_idata[i + blockDim.x]);
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s) SelectHigherScore(sdata[tid], sdata[tid + s]);
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
void ComputeDimensions(int count, int & grid_size, int & block_size)
{
block_size = (count < REDUCE_MAX_THREADS * 2) ? nextPow2((count + 1) / 2) : REDUCE_MAX_THREADS;
grid_size = (count + (block_size * 2 - 1)) / (block_size * 2);
}
Result GetBestResult(int count)
{
int grid_size, block_size;
ComputeDimensions(count, grid_size, block_size);
if (d_temp == NULL)
CUDA_CHECK(hipMalloc((void **)&d_temp, grid_size * sizeof(Result)));
FindBestResultKernel << < grid_size, block_size >> >
(h_task.results, d_temp, count);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
int s = grid_size;
while (s > 1)
{
CUDA_CHECK(hipMemcpy(h_task.results, d_temp, s * sizeof(Result),
hipMemcpyDeviceToDevice));
ComputeDimensions(s, grid_size, block_size);
FindBestResultKernel << < grid_size, block_size >> >
(h_task.results, d_temp, s);
CUDA_CHECK(hipGetLastError());
s = (s + (block_size * 2 - 1)) / (block_size * 2);
}
Result result;
CUDA_CHECK(hipMemcpy(&result, d_temp, sizeof(Result), hipMemcpyDeviceToHost));
return result;
}
//------------------------------------------------------------------------------
// util
//------------------------------------------------------------------------------
bool SelectGpuDevice(int req_major, int req_minor, int settings_device, bool silent)
{
int best_device = 0;
int num_devices;
hipDeviceProp_t prop;
CUDA_CHECK(hipGetDeviceCount(&num_devices));
if (num_devices==0)
{
std::cerr << "GPU not found. Terminating." << std::endl;
return false;
}
const char* cudadevStr = ::getenv("CUDADEV");
if (settings_device != -1)
best_device = settings_device;
else if (cudadevStr != nullptr)
best_device = atoi(cudadevStr);
else if (num_devices > 1)
{
int max_sm = 0;
for (int device = 0; device < num_devices; device++)
{
CUDA_CHECK(hipGetDeviceProperties(&prop, device));
if (prop.multiProcessorCount > max_sm)
{
max_sm = prop.multiProcessorCount;
best_device = device;
}
}
}
if (best_device < 0 || best_device >= num_devices)
{
std::cerr << "Choosen device out of range" << std::endl;
return false;
}
CUDA_CHECK(hipGetDeviceProperties(&prop, best_device));
if (!silent)
{
std::cout << "Found GPU '" << prop.name << "' with compute capability ";
std::cout << prop.major << "." << prop.minor << "." << std::endl;
}
if (prop.major < req_major || (prop.major == req_major && prop.minor < req_minor))
{
std::cerr << "Program requires GPU with compute capability ";
std::cerr << req_major << "." << req_minor;
std::cerr << " or higher." << std::endl << "Terminating.";
return false;
}
CUDA_CHECK(hipSetDevice(best_device));
return true;
}
int8_t DecodeLetter(int8_t c, const Key & key, const int8_t * plugs)
{
int8_t r = mod26(key.sett.r_mesg - key.sett.r_ring);
int8_t m = mod26(key.sett.m_mesg - key.sett.m_ring);
int8_t l = mod26(key.sett.l_mesg - key.sett.l_ring);
int8_t g = mod26(key.sett.g_mesg - key.sett.g_ring);
c = plugs[c];
c = wiring.rotors[key.stru.r_slot][mod26(c + r)] - r;
c = wiring.rotors[key.stru.m_slot][mod26(c + m)] - m;
c = wiring.rotors[key.stru.l_slot][mod26(c + l)] - l;
c = wiring.rotors[key.stru.g_slot][mod26(c + g)] - g;
c = wiring.reflectors[key.stru.ukwnum][mod26(c)];
c = wiring.reverse_rotors[key.stru.g_slot][mod26(c + g)] - g;
c = wiring.reverse_rotors[key.stru.l_slot][mod26(c + l)] - l;
c = wiring.reverse_rotors[key.stru.m_slot][mod26(c + m)] - m;
c = wiring.reverse_rotors[key.stru.r_slot][mod26(c + r)] - r;
return plugs[mod26(c)];
}
string DecodeMessage(const string & ciphertext, const string & key_string,
const int8_t * plugs)
{
Key key;
key.FromString(key_string);
string result = ciphertext;
for (int i = 0; i < result.length(); i++)
{
key.Step();
result[i] = ToChar(DecodeLetter(ToNum(result[i]), key, plugs));
}
return LowerCase(result);
}
string DecodeMessage(const string & ciphertext, const string & key_string,
const string & plugboard_string)
{
Plugboard plugboard;
plugboard.FromString(plugboard_string);
return DecodeMessage(ciphertext, key_string, plugboard.plugs);
}
void CleanUpGPU()
{ } // nothing because CUDA
|
ac63652a3debb9b92fd2c355acd002bf2b7f91a9.cu
|
#pragma once
/* This Source Code Form is subject to the terms of the Mozilla Public */
/* License, v. 2.0. If a copy of the MPL was not distributed with this */
/* file, You can obtain one at http ://mozilla.org/MPL/2.0/. */
/* Copyright (c) 2017 Alex Shovkoplyas VE3NEA */
#include <iostream>
#include <iomanip>
#include <time.h>
#include <math.h>
#include <ctime>
#include <memory>
#include <algorithm>
#include "cuda_code.h"
#include "cuda_code.cuh"
#include "cuda_err_check.h"
#include "plugboard.h"
#include "ngrams.h"
#include "iterator.h"
__constant__ int8_t d_ciphertext[MAX_MESSAGE_LENGTH];
__constant__ Wiring d_wiring;
__constant__ Key d_key;
__constant__ NGRAM_DATA_TYPE d_unigrams[ALPSIZE];
__constant__ NGRAM_DATA_TYPE d_bigrams[ALPSIZE][ALPSIZE];
__constant__ int8_t d_order[ALPSIZE];
__constant__ int8_t d_plugs[ALPSIZE];
__constant__ bool d_fixed[ALPSIZE];
Result * d_temp;
Task h_task;
//------------------------------------------------------------------------------
// scrambler
//------------------------------------------------------------------------------
__host__ __device__
int8_t mod26(const int16_t x)
{
return (ALPSIZE * 2 + x) % ALPSIZE;
}
void SetUpScramblerMemory()
{
//wiring to gpu
CUDA_CHECK(cudaMemcpyToSymbol(d_wiring, &wiring, sizeof(Wiring)));
//memory for scrambler
CUDA_CHECK(cudaMallocPitch(&h_task.scrambler.data, &h_task.scrambler.pitch,
28, ALPSIZE_TO3));
}
__global__
void GenerateScramblerKernel(const Task task)
{
__shared__ const int8_t * reflector;
__shared__ const int8_t * g_rotor;
__shared__ const int8_t * l_rotor;
__shared__ const int8_t * m_rotor;
__shared__ const int8_t * r_rotor;
__shared__ const int8_t * g_rev_rotor;
__shared__ const int8_t * l_rev_rotor;
__shared__ const int8_t * m_rev_rotor;
__shared__ const int8_t * r_rev_rotor;
__shared__ int8_t r_core_position;
__shared__ int8_t m_core_position;
__shared__ int8_t l_core_position;
__shared__ int8_t g_core_position;
__shared__ int8_t * entry;
if (threadIdx.x == 0)
{
//wirings
reflector = d_wiring.reflectors[d_key.stru.ukwnum];
g_rotor = d_wiring.rotors[d_key.stru.g_slot];
l_rotor = d_wiring.rotors[d_key.stru.l_slot];
m_rotor = d_wiring.rotors[d_key.stru.m_slot];
r_rotor = d_wiring.rotors[d_key.stru.r_slot];
g_rev_rotor = d_wiring.reverse_rotors[d_key.stru.g_slot];
l_rev_rotor = d_wiring.reverse_rotors[d_key.stru.l_slot];
m_rev_rotor = d_wiring.reverse_rotors[d_key.stru.m_slot];
r_rev_rotor = d_wiring.reverse_rotors[d_key.stru.r_slot];
//core positions
r_core_position = blockIdx.x;
m_core_position = blockIdx.y;
l_core_position = blockIdx.z;
g_core_position = mod26(d_key.sett.g_mesg - d_key.sett.g_ring);
//address of scrambler entry
entry = task.scrambler.data + task.scrambler.pitch * (
l_core_position * ALPSIZE * ALPSIZE +
m_core_position * ALPSIZE +
r_core_position);
}
__syncthreads();
//scramble one char
int8_t ch_in = threadIdx.x;
int8_t ch_out = ch_in;
ch_out = r_rotor[mod26(ch_out + r_core_position)] - r_core_position;
ch_out = m_rotor[mod26(ch_out + m_core_position)] - m_core_position;
ch_out = l_rotor[mod26(ch_out + l_core_position)] - l_core_position;
if (d_key.stru.model == enigmaM4)
{
ch_out = g_rotor[mod26(ch_out + g_core_position)] - g_core_position;
ch_out = reflector[mod26(ch_out)];
ch_out = g_rev_rotor[mod26(ch_out + g_core_position)] - g_core_position;
}
else
{
ch_out = reflector[mod26(ch_out)];
}
ch_out = l_rev_rotor[mod26(ch_out + l_core_position)] - l_core_position;
ch_out = m_rev_rotor[mod26(ch_out + m_core_position)] - m_core_position;
ch_out = r_rev_rotor[mod26(ch_out + r_core_position)] - r_core_position;
//char to scrambler
entry[ch_in] = mod26(ch_out);
}
void GenerateScrambler(const Key & key)
{
//key to gpu
CUDA_CHECK(cudaMemcpyToSymbol(d_key, &key, sizeof(Key)));
//block and grid dimensions
dim3 dimBlock(ALPSIZE);
dim3 dimGrid(ALPSIZE, ALPSIZE, ALPSIZE);
//run kernel
GenerateScramblerKernel << < dimGrid, dimBlock >> > (h_task);
CUDA_CHECK(cudaDeviceSynchronize());
}
__host__ __device__
int ComputeScramblerIndex(int char_pos,
const ScramblerStructure & stru,
const RotorSettings & sett, const Wiring & wiring)
{
//retrieve notch info
const int8_t * r_notch = wiring.notch_positions[stru.r_slot];
const int8_t * m_notch = wiring.notch_positions[stru.m_slot];
//period of the rotor turnovers
int m_period = (r_notch[1] == NONE) ? ALPSIZE : HALF_ALPSIZE;
int l_period = (m_notch[1] == NONE) ? ALPSIZE : HALF_ALPSIZE;
l_period = --l_period * m_period;
//current wheel position relative to the last notch
int r_after_notch = sett.r_mesg - r_notch[0];
if (r_after_notch < 0) r_after_notch += ALPSIZE;
if (r_notch[1] != NONE && r_after_notch >= (r_notch[1] - r_notch[0]))
r_after_notch -= r_notch[1] - r_notch[0];
int m_after_notch = sett.m_mesg - m_notch[0];
if (m_after_notch < 0) m_after_notch += ALPSIZE;
if (m_notch[1] != NONE && m_after_notch >= (m_notch[1] - m_notch[0]))
m_after_notch -= m_notch[1] - m_notch[0];
//middle wheel turnover phase
int m_phase = r_after_notch - 1;
if (m_phase < 0) m_phase += m_period;
//left wheel turnover phase
int l_phase = m_phase - 1 + (m_after_notch - 1) * m_period;
if (l_phase < 0) l_phase += l_period;
//hacks
if (m_after_notch == 0) l_phase += m_period;
if (m_after_notch == 1 && r_after_notch == 1)
l_phase -= l_period; //effectively sets l_phase to -1
if (m_after_notch == 0 && r_after_notch == 0)
{
m_phase -= m_period;
l_phase -= m_period;
if (char_pos == 0) l_phase++;
}
//save debug info
// r_after_notch_display = r_after_notch;
// m_after_notch_display = m_after_notch;
// l_phase_display = l_phase;
//number of turnovers
int m_steps = (m_phase + char_pos + 1) / m_period;
int l_steps = (l_phase + char_pos + 1) / l_period;
//double step of the middle wheel
m_steps += l_steps;
//rotor core poistions to scrambling table index
return mod26(sett.l_mesg - sett.l_ring + l_steps) * ALPSIZE_TO2 +
mod26(sett.m_mesg - sett.m_ring + m_steps) * ALPSIZE +
mod26(sett.r_mesg - sett.r_ring + char_pos + 1);
}
__host__ __device__
TurnoverLocation GetTurnoverLocation(const ScramblerStructure & stru,
const RotorSettings sett, int ciphertext_length, const Wiring & wiring)
{
//rotors with two notches
if (stru.r_slot > rotV && sett.r_ring >= HALF_ALPSIZE)
return toAfterMessage;
if (stru.m_slot > rotV && sett.m_ring >= HALF_ALPSIZE)
return toAfterMessage;
//does the left hand rotor turn right before the message?
int8_t l_core_before = mod26(sett.l_mesg - sett.l_ring);
int8_t l_core_first = ComputeScramblerIndex(0, stru, sett, wiring)
/ ALPSIZE_TO2;
if (l_core_first != l_core_before) return toBeforeMessage;
//does it turn during the message?
int8_t l_core_last =
ComputeScramblerIndex(ciphertext_length-1, stru, sett, wiring)
/ ALPSIZE_TO2;
if (l_core_last != l_core_first) return toDuringMessage;
return toAfterMessage;
}
//move the relevant part of the scrambler from global to shared memory
//and shuffle it to avoid bank conflicts
__device__
const int8_t * ScramblerToShared(const int8_t * global_scrambling_table)
{
//global: ALPSIZE bytes at sequential addresses
const int32_t * src =
reinterpret_cast<const int32_t *>(global_scrambling_table);
//shared: same bytes in groups of 4 at a stride of 128
extern __shared__ int8_t shared_scrambling_table[];
int32_t * dst = reinterpret_cast<int32_t *>(shared_scrambling_table);
//copy ALPSIZE bytes as 7 x 32-bit words
int idx = (threadIdx.x & ~31) * 7 + (threadIdx.x & 31);
for (int i = 0; i < 7; ++i) dst[idx + 32 * i] = src[i];
return &shared_scrambling_table[idx * 4];
}
//------------------------------------------------------------------------------
// constants to device
//------------------------------------------------------------------------------
void CipherTextToDevice(string ciphertext_string)
{
std::vector<int8_t> cipher = TextToNumbers(ciphertext_string);
int8_t * cipher_data = cipher.data();
CUDA_CHECK(cudaMemcpyToSymbol(d_ciphertext, cipher_data, cipher.size()));
h_task.count = (int)cipher.size();
}
void PlugboardStringToDevice(string plugboard_string)
{
Plugboard plugboard;
plugboard.FromString(plugboard_string);
PlugboardToDevice(plugboard);
}
void PlugboardToDevice(const Plugboard & plugboard)
{
CUDA_CHECK(cudaMemcpyToSymbol(d_plugs, plugboard.plugs, ALPSIZE));
CUDA_CHECK(cudaMemcpyToSymbol(d_fixed, plugboard.fixed,
sizeof(bool) * ALPSIZE));
}
void OrderToDevice(const int8_t * order)
{
CUDA_CHECK(cudaMemcpyToSymbol(d_order, order, ALPSIZE));
}
void InitializeArrays(const string cipher_string, int turnover_modes,
int score_kinds, int digits)
{
//d_ciphertext
CipherTextToDevice(cipher_string);
//d_wiring
SetUpScramblerMemory();
//allow_turnover
h_task.turnover_modes = turnover_modes;
//use unigrams
h_task.score_kinds = score_kinds;
//d_results
int count = (int)pow(ALPSIZE, digits);
SetUpResultsMemory(count);
}
//------------------------------------------------------------------------------
// score
//------------------------------------------------------------------------------
void NgramsToDevice(const string & uni_filename,
const string & bi_filename, const string & tri_filename)
{
if (uni_filename != "")
{
Unigrams unigrams;
unigrams.LoadFromFile(uni_filename);
CUDA_CHECK(cudaMemcpyToSymbol(d_unigrams, unigrams.data, sizeof(d_unigrams)));
}
if (bi_filename != "")
{
Bigrams bigrams;
bigrams.LoadFromFile(bi_filename);
CUDA_CHECK(cudaMemcpyToSymbol(d_bigrams, bigrams.data, sizeof(d_bigrams)));
}
if (tri_filename != "")
{
//trigram data
Trigrams trigrams_obj;
trigrams_obj.LoadFromFile(tri_filename);
//non-pitched array in device memory. slightly faster than pitched
CUDA_CHECK(cudaMalloc(&h_task.trigrams.data,
sizeof(NGRAM_DATA_TYPE) * ALPSIZE_TO3));
h_task.trigrams.pitch = sizeof(NGRAM_DATA_TYPE) * ALPSIZE;
//data to device
CUDA_CHECK(cudaMemcpy(h_task.trigrams.data, trigrams_obj.data,
sizeof(NGRAM_DATA_TYPE) * ALPSIZE_TO3, cudaMemcpyHostToDevice));
}
}
__device__
int8_t Decode(const int8_t * plugboard, const int8_t * scrambling_table)
{
int8_t c = d_ciphertext[threadIdx.x];
c = plugboard[c];
c = scrambling_table[(c & ~3) * 32 + (c & 3)];
c = plugboard[c];
return c;
}
//MIN_MESSAGE_LENGTH <= count <= MAX_MESSAGE_LENGTH
__device__
void Sum(int count, volatile int * data, int * sum)
{
if ((threadIdx.x + 128) < count) data[threadIdx.x] += data[128 + threadIdx.x];
__syncthreads();
if (threadIdx.x < 64 && (threadIdx.x + 64) < count)
data[threadIdx.x] += data[64 + threadIdx.x];
__syncthreads();
if (threadIdx.x < 32)
{
if ((threadIdx.x + 32) < count) data[threadIdx.x] += data[32 + threadIdx.x];
if ((threadIdx.x + 16) < count) data[threadIdx.x] += data[16 + threadIdx.x];
data[threadIdx.x] += data[8 + threadIdx.x];
data[threadIdx.x] += data[4 + threadIdx.x];
data[threadIdx.x] += data[2 + threadIdx.x];
if (threadIdx.x == 0) *sum = data[0] + data[1];
}
__syncthreads();
}
#define HISTO_SIZE 32
__device__
void IcScore(Block & block, const int8_t * scrambling_table)
{
//init histogram
if (threadIdx.x < HISTO_SIZE) block.score_buf[threadIdx.x] = 0;
__syncthreads();
//compute histogram
if (threadIdx.x < block.count)
{
int8_t c = Decode(block.plugs, scrambling_table);
atomicAdd((int *)&block.score_buf[c], 1);
}
__syncthreads();
//TODO: try lookup table here, ic[MAX_MESSAGE_LENGTH]
if (threadIdx.x < HISTO_SIZE)
block.score_buf[threadIdx.x] *= block.score_buf[threadIdx.x] - 1;
//sum up
if (threadIdx.x < HISTO_SIZE / 2)
{
block.score_buf[threadIdx.x] += block.score_buf[threadIdx.x + 16];
block.score_buf[threadIdx.x] += block.score_buf[threadIdx.x + 8];
block.score_buf[threadIdx.x] += block.score_buf[threadIdx.x + 4];
block.score_buf[threadIdx.x] += block.score_buf[threadIdx.x + 2];
if (threadIdx.x == 0) block.score = block.score_buf[0] + block.score_buf[1];
}
__syncthreads();
}
//TODO: put unigram table to shared memory
__device__
void UniScore(Block & block, const int8_t * scrambling_table)
{
if (threadIdx.x < block.count)
{
int8_t c = Decode(block.plugs, scrambling_table);
block.score_buf[threadIdx.x] = block.unigrams[c];
}
__syncthreads();
Sum(block.count, block.score_buf, &block.score);
}
__device__
void BiScore(Block & block, const int8_t * scrambling_table)
{
if (threadIdx.x < block.count)
block.plain_text[threadIdx.x] = Decode(block.plugs, scrambling_table);
__syncthreads();
//TODO: trigrams are faster than bigrams.
//is it because trigrams are not declared as constants?
//or because their index is computed explicitly?
if (threadIdx.x < (block.count - 1))
block.score_buf[threadIdx.x] =
d_bigrams[block.plain_text[threadIdx.x]]
[block.plain_text[threadIdx.x + 1]];
__syncthreads();
Sum(block.count - 1, block.score_buf, &block.score);
}
//TODO: use bit mask in shared memory for non-zero elements
//676 bit flags for first 2 letters in tirgram
//save ~ half global memory reads
__device__
void TriScore(Block & block, const int8_t * scrambling_table)
{
//decode char
if (threadIdx.x < block.count)
block.plain_text[threadIdx.x] = Decode(block.plugs, scrambling_table);
__syncthreads();
//look up scores
if (threadIdx.x < (block.count - 2))
block.score_buf[threadIdx.x] = block.trigrams[
block.plain_text[threadIdx.x] * ALPSIZE_TO2 +
block.plain_text[threadIdx.x + 1] * ALPSIZE +
block.plain_text[threadIdx.x+2]];
__syncthreads();
Sum(block.count - 2, block.score_buf, &block.score);
}
__device__
void CalculateScore(Block & block, const int8_t * scrambling_table)
{
switch (block.score_kind)
{
case skTrigram: TriScore(block, scrambling_table); break;
case skBigram: BiScore(block, scrambling_table); break;
case skUnigram: UniScore(block, scrambling_table); break;
case skIC: IcScore(block, scrambling_table); break;
}
}
//------------------------------------------------------------------------------
// climber
//------------------------------------------------------------------------------
__device__ void TrySwap(int8_t i, int8_t k,
const int8_t * scrambling_table, Block & block)
{
__shared__ int old_score;
int8_t x, z;
old_score = block.score;
if (d_fixed[i] || d_fixed[k]) return;
if (threadIdx.x == 0)
{
x = block.plugs[i];
z = block.plugs[k];
if (x == k)
{
block.plugs[i] = i;
block.plugs[k] = k;
}
else
{
if (x != i)
{
block.plugs[i] = i;
block.plugs[x] = x;
};
if (z != k)
{
block.plugs[k] = k;
block.plugs[z] = z;
};
block.plugs[i] = k;
block.plugs[k] = i;
}
}
__syncthreads();
CalculateScore(block, scrambling_table);
if (threadIdx.x == 0 && block.score <= old_score)
{
block.score = old_score;
block.plugs[z] = k;
block.plugs[x] = i;
block.plugs[k] = z;
block.plugs[i] = x;
}
__syncthreads();
}
__device__ void MaximizeScore(Block & block, const int8_t * scrambling_table)
{
CalculateScore(block, scrambling_table);
for (int p = 0; p < ALPSIZE - 1; p++)
for (int q = p + 1; q < ALPSIZE; q++)
TrySwap(d_order[p], d_order[q], scrambling_table, block);
}
__global__ void ClimbKernel(const Task task)
{
__shared__ Block block;
__shared__ RotorSettings sett;
__shared__ bool skip_this_key;
__shared__ Result * result;
if (threadIdx.x < ALPSIZE)
{
block.plugs[threadIdx.x] = d_plugs[threadIdx.x];
block.unigrams[threadIdx.x] = d_unigrams[threadIdx.x];
}
if (threadIdx.x == 0)
{
block.trigrams = reinterpret_cast<int*>(task.trigrams.data);
block.count = task.count;
//ring and rotor settings to be tried
sett.g_ring = 0;
sett.l_ring = 0;
//depending on the grid size, ring positions
//either from grid index or fixed (from d_key)
sett.m_ring = (gridDim.y > ALPSIZE) ? blockIdx.y / ALPSIZE : d_key.sett.m_ring;
sett.r_ring = (gridDim.y > 1) ? blockIdx.y % ALPSIZE : d_key.sett.r_ring;
sett.g_mesg = d_key.sett.g_mesg;
sett.l_mesg = (gridDim.x > ALPSIZE_TO2) ? blockIdx.x / ALPSIZE_TO2 : d_key.sett.l_mesg;
sett.m_mesg = (gridDim.x > ALPSIZE) ? (blockIdx.x / ALPSIZE) % ALPSIZE : d_key.sett.m_mesg;
sett.r_mesg = (gridDim.x > 1) ? blockIdx.x % ALPSIZE : d_key.sett.r_mesg;
//element of results[] to store the output
int linear_idx = blockIdx.y * gridDim.x + blockIdx.x;
result = &task.results[linear_idx];
result->index = linear_idx;
result->score = 0;
skip_this_key = ((gridDim.x > 1) &&
(GetTurnoverLocation(d_key.stru, sett, block.count, d_wiring)
& task.turnover_modes) == 0);
}
__syncthreads();
if (skip_this_key) return;
const int8_t * scrambling_table;
if (threadIdx.x < block.count)
{
scrambling_table = task.scrambler.data +
ComputeScramblerIndex(threadIdx.x, d_key.stru, sett, d_wiring) *
task.scrambler.pitch;
scrambling_table = ScramblerToShared(scrambling_table);
}
//IC once
if (task.score_kinds & skIC)
{
block.score_kind = skIC;
MaximizeScore(block, scrambling_table);
}
//unigrams once
if (task.score_kinds & skUnigram)
{
block.score_kind = skUnigram;
MaximizeScore(block, scrambling_table);
}
//bigrams once
if (task.score_kinds & skBigram)
{
block.score_kind = skBigram;
MaximizeScore(block, scrambling_table);
}
//trigrams until convergence
if (task.score_kinds & skTrigram)
{
block.score_kind = skTrigram;
block.score = 0;
int old_score;
do
{
old_score = block.score;
MaximizeScore(block, scrambling_table);
}
while (block.score > old_score);
}
//copy plugboard solution to global results array;
if (threadIdx.x < ALPSIZE) result->plugs[threadIdx.x] = block.plugs[threadIdx.x];
if (threadIdx.x == 0) result->score = block.score;
}
Result Climb(int cipher_length, const Key & key, bool single_key)
{
try
{
CUDA_CHECK(cudaMemcpyToSymbol(d_key, &key, sizeof(Key)));
int grid_size = single_key ? 1 : ALPSIZE_TO3;
int block_size = std::max(32, cipher_length);
int shared_scrambler_size = ((cipher_length + 31) & ~31) * 28;
ClimbKernel << <grid_size, block_size, shared_scrambler_size >> > (h_task);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
return GetBestResult(ALPSIZE_TO3);
}
catch (const std::runtime_error & e)
{
std::cout << e.what() << std::endl;
cudaGetLastError();
cudaDeviceReset();
cudaSetDevice(0);
throw e;
}
}
//------------------------------------------------------------------------------
// results
//------------------------------------------------------------------------------
#define REDUCE_MAX_THREADS 256
void SetUpResultsMemory(int count)
{
CUDA_CHECK(cudaMalloc((void**)&h_task.results, count * sizeof(Result)));
}
__device__ void SelectHigherScore(Result & a, const Result & b)
{
if (b.score > a.score) a = b;
}
__global__ void FindBestResultKernel(Result *g_idata, Result *g_odata,
unsigned int count)
{
__shared__ Result sdata[REDUCE_MAX_THREADS];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + tid;
if (i < count) sdata[tid] = g_idata[i];
else sdata[tid].score = 0;
if (i + blockDim.x < count) SelectHigherScore(sdata[tid], g_idata[i + blockDim.x]);
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s) SelectHigherScore(sdata[tid], sdata[tid + s]);
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
void ComputeDimensions(int count, int & grid_size, int & block_size)
{
block_size = (count < REDUCE_MAX_THREADS * 2) ? nextPow2((count + 1) / 2) : REDUCE_MAX_THREADS;
grid_size = (count + (block_size * 2 - 1)) / (block_size * 2);
}
Result GetBestResult(int count)
{
int grid_size, block_size;
ComputeDimensions(count, grid_size, block_size);
if (d_temp == NULL)
CUDA_CHECK(cudaMalloc((void **)&d_temp, grid_size * sizeof(Result)));
FindBestResultKernel << < grid_size, block_size >> >
(h_task.results, d_temp, count);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
int s = grid_size;
while (s > 1)
{
CUDA_CHECK(cudaMemcpy(h_task.results, d_temp, s * sizeof(Result),
cudaMemcpyDeviceToDevice));
ComputeDimensions(s, grid_size, block_size);
FindBestResultKernel << < grid_size, block_size >> >
(h_task.results, d_temp, s);
CUDA_CHECK(cudaGetLastError());
s = (s + (block_size * 2 - 1)) / (block_size * 2);
}
Result result;
CUDA_CHECK(cudaMemcpy(&result, d_temp, sizeof(Result), cudaMemcpyDeviceToHost));
return result;
}
//------------------------------------------------------------------------------
// util
//------------------------------------------------------------------------------
bool SelectGpuDevice(int req_major, int req_minor, int settings_device, bool silent)
{
int best_device = 0;
int num_devices;
cudaDeviceProp prop;
CUDA_CHECK(cudaGetDeviceCount(&num_devices));
if (num_devices==0)
{
std::cerr << "GPU not found. Terminating." << std::endl;
return false;
}
const char* cudadevStr = ::getenv("CUDADEV");
if (settings_device != -1)
best_device = settings_device;
else if (cudadevStr != nullptr)
best_device = atoi(cudadevStr);
else if (num_devices > 1)
{
int max_sm = 0;
for (int device = 0; device < num_devices; device++)
{
CUDA_CHECK(cudaGetDeviceProperties(&prop, device));
if (prop.multiProcessorCount > max_sm)
{
max_sm = prop.multiProcessorCount;
best_device = device;
}
}
}
if (best_device < 0 || best_device >= num_devices)
{
std::cerr << "Choosen device out of range" << std::endl;
return false;
}
CUDA_CHECK(cudaGetDeviceProperties(&prop, best_device));
if (!silent)
{
std::cout << "Found GPU '" << prop.name << "' with compute capability ";
std::cout << prop.major << "." << prop.minor << "." << std::endl;
}
if (prop.major < req_major || (prop.major == req_major && prop.minor < req_minor))
{
std::cerr << "Program requires GPU with compute capability ";
std::cerr << req_major << "." << req_minor;
std::cerr << " or higher." << std::endl << "Terminating.";
return false;
}
CUDA_CHECK(cudaSetDevice(best_device));
return true;
}
int8_t DecodeLetter(int8_t c, const Key & key, const int8_t * plugs)
{
int8_t r = mod26(key.sett.r_mesg - key.sett.r_ring);
int8_t m = mod26(key.sett.m_mesg - key.sett.m_ring);
int8_t l = mod26(key.sett.l_mesg - key.sett.l_ring);
int8_t g = mod26(key.sett.g_mesg - key.sett.g_ring);
c = plugs[c];
c = wiring.rotors[key.stru.r_slot][mod26(c + r)] - r;
c = wiring.rotors[key.stru.m_slot][mod26(c + m)] - m;
c = wiring.rotors[key.stru.l_slot][mod26(c + l)] - l;
c = wiring.rotors[key.stru.g_slot][mod26(c + g)] - g;
c = wiring.reflectors[key.stru.ukwnum][mod26(c)];
c = wiring.reverse_rotors[key.stru.g_slot][mod26(c + g)] - g;
c = wiring.reverse_rotors[key.stru.l_slot][mod26(c + l)] - l;
c = wiring.reverse_rotors[key.stru.m_slot][mod26(c + m)] - m;
c = wiring.reverse_rotors[key.stru.r_slot][mod26(c + r)] - r;
return plugs[mod26(c)];
}
string DecodeMessage(const string & ciphertext, const string & key_string,
const int8_t * plugs)
{
Key key;
key.FromString(key_string);
string result = ciphertext;
for (int i = 0; i < result.length(); i++)
{
key.Step();
result[i] = ToChar(DecodeLetter(ToNum(result[i]), key, plugs));
}
return LowerCase(result);
}
string DecodeMessage(const string & ciphertext, const string & key_string,
const string & plugboard_string)
{
Plugboard plugboard;
plugboard.FromString(plugboard_string);
return DecodeMessage(ciphertext, key_string, plugboard.plugs);
}
void CleanUpGPU()
{ } // nothing because CUDA
|
94678874b234f9889937d6fb224b5a949e81cd25.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ float sigma(float x) {
return x / (1 + ((x < 0) ? -x : x));
}
__global__ void calcAll(float *w, float *b, float *v, uint32_t *wo, uint32_t *lo, uint32_t *ls, uint32_t s) {
uint32_t id = (blockIdx.x * blockDim.x) + threadIdx.x;
if(id < s) {
uint32_t ln = 0;
while(id > lo[ln]) ln++;
float sum = 0;
for(uint32_t i = 0; i < ls[ln - 1]; i++) {
sum += w[wo[ln] + ((id - lo[ln]) * ls[ln - 1]) + i] * v[lo[ln - 1] + i];
}
v[id] = sigma(sum + b[id]);
}
}
|
94678874b234f9889937d6fb224b5a949e81cd25.cu
|
#include "includes.h"
__device__ float sigma(float x) {
return x / (1 + ((x < 0) ? -x : x));
}
__global__ void calcAll(float *w, float *b, float *v, uint32_t *wo, uint32_t *lo, uint32_t *ls, uint32_t s) {
uint32_t id = (blockIdx.x * blockDim.x) + threadIdx.x;
if(id < s) {
uint32_t ln = 0;
while(id > lo[ln]) ln++;
float sum = 0;
for(uint32_t i = 0; i < ls[ln - 1]; i++) {
sum += w[wo[ln] + ((id - lo[ln]) * ls[ln - 1]) + i] * v[lo[ln - 1] + i];
}
v[id] = sigma(sum + b[id]);
}
}
|
993e72d96938dc14ce8bd712a5599e8fdbb2c0ef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./common/book.h"
#include "./common/image.h"
#define DIM 1000
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b):r(a),i(b) {}
__device__ float magnitude2(void){ return r*r + i*i; }
__device__ hipComplex operator*(const hipComplex &a){
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex &a){
return hipComplex(r+a.r, i+a.i);
}
};
__device__
int julia( int x, int y ) {
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__
void kernel(unsigned char *ptr) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y );
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
int main(void) {
IMAGE bitmap( DIM, DIM );
unsigned char *dev_bitmap;
HANDLE_ERROR( hipMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
dim3 grid(DIM,DIM);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap );
HANDLE_ERROR( hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipFree( dev_bitmap ) );
bitmap.save_image("julia_gpu.png");
return 0;
}
|
993e72d96938dc14ce8bd712a5599e8fdbb2c0ef.cu
|
#include "./common/book.h"
#include "./common/image.h"
#define DIM 1000
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b):r(a),i(b) {}
__device__ float magnitude2(void){ return r*r + i*i; }
__device__ cuComplex operator*(const cuComplex &a){
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex &a){
return cuComplex(r+a.r, i+a.i);
}
};
__device__
int julia( int x, int y ) {
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__
void kernel(unsigned char *ptr) {
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
// now calculate the value at that position
int juliaValue = julia( x, y );
ptr[offset*4 + 0] = 255 * juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 0;
ptr[offset*4 + 3] = 255;
}
int main(void) {
IMAGE bitmap( DIM, DIM );
unsigned char *dev_bitmap;
HANDLE_ERROR( cudaMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
dim3 grid(DIM,DIM);
kernel<<<grid,1>>>( dev_bitmap );
HANDLE_ERROR( cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaFree( dev_bitmap ) );
bitmap.save_image("julia_gpu.png");
return 0;
}
|
b34656e9ba1bec2c37e4fc73e81b07611bddcc81.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"LRNLayer.cuh"
int CLRNLayerGPU::setup(std::vector<Blob<precision>*>& inputs,std::vector<Blob<precision>*>& outputs)
{
DL_ASSER(m_param.lrnParam.localSize%2!=0);
m_prePad = (m_param.lrnParam.localSize-1)/2;
if(m_param.lrnParam.normRegionType == ACROSS_CHANNELS)
{
m_scales.create(inputs[0]->num,inputs[0]->dataChannel,inputs[0]->dimHeight,inputs[0]->dimWidth);
outputs[0]->create(inputs[0]->num,inputs[0]->dataChannel,inputs[0]->dimHeight,inputs[0]->dimWidth);
}
return 0;
}
//block<<<batch,channels>>>
//threads<<<min(1024,dimHeight*dimWidth)>>>
__global__ void computScales(
precision* scales,
precision* inputs,
unsigned int dataSize,
int dimSize,
int padded,
int localSize,
precision alphaOver,
int k=1
)
{
unsigned int numOffset=blockIdx.x*dataSize;
int nChannelId=blockIdx.y;
unsigned int tid=threadIdx.x;
int first=max(0,nChannelId-padded);
int end=min(nChannelId+localSize-padded,gridDim.y);
for(unsigned int i=tid;i<dimSize;i+=blockDim.x)
{
precision tmp=0.0;
for(int c=first;c<end;c++)
tmp+=pow(inputs[numOffset+c*dimSize+i],2);
scales[numOffset+nChannelId*dimSize+i]=k+tmp*alphaOver;
}
}
//block<<<weightLeng/threadNum>>>
//thread<<<min(1024,weightLeng)>>>
__global__ void normalAcrossChannels(precision* inputDatas, precision* scales,precision* normalDatas,unsigned int dataLeng,precision beat)
{
for(int i = 0; i < dataLeng; i += blockDim.x * gridDim.x)
{
int id = i + blockIdx.x * blockDim.x + threadIdx.x;
if(id < dataLeng)
normalDatas[id] = inputDatas[id]*pow(scales[id],-beat);
}
}
precision CLRNLayerGPU::feedforward(std::vector<Blob<precision>*>& bottoms,std::vector<Blob<precision>*>& tops)
{
int nRet=NET_SUCCESS;
if(m_param.lrnParam.normRegionType == ACROSS_CHANNELS)
nRet=crossChannelForward(bottoms,tops);
return nRet;
}
int CLRNLayerGPU::crossChannelForward(std::vector<Blob<precision>*>& bottoms,std::vector<Blob<precision>*>& tops)
{
int localSize=m_param.lrnParam.localSize;
precision alphaOver=m_param.lrnParam.alpha/localSize;
unsigned int dataSize=bottoms[0]->size()/bottoms[0]->num;
int dimSize=bottoms[0]->dimHeight*tops[0]->dimWidth;
dim3 blocks(bottoms[0]->num,bottoms[0]->dataChannel);
dim3 threads=min(1024,dimSize);
hipLaunchKernelGGL(( computScales), dim3(blocks),dim3(threads), 0, 0, m_scales.gpuData,
bottoms[0]->gpuData,
dataSize,
dimSize,
m_prePad,
m_param.lrnParam.localSize,
alphaOver);
hipError_t cudaStat=hipDeviceSynchronize();
CUDA_ERROR(cudaStat);
unsigned int leng=m_scales.size();
threads= min(1024,leng);
blocks = min(65535, (leng + threads.x - 1) / threads.x);
hipLaunchKernelGGL(( normalAcrossChannels), dim3(blocks),dim3(threads), 0, 0, bottoms[0]->gpuData,
m_scales.gpuData,
tops[0]->gpuData,
leng,
m_param.lrnParam.beat
);
cudaStat=hipDeviceSynchronize();
CUDA_ERROR(cudaStat);
return NET_SUCCESS;
}
int CLRNLayerGPU::backpropagation(std::vector<Blob<precision>*>& tops,std::vector<bool>& propagateDown,std::vector<Blob<precision>*>& bottoms)
{
int nRet=NET_SUCCESS;
if(m_param.lrnParam.normRegionType == ACROSS_CHANNELS)
nRet=crossChannelBack(tops,propagateDown,bottoms);
return nRet;
}
//block<<<batch,channels>>>
//threads<<<min(1024,dimHeight*dimWidth)>>>
__global__ void computBottomDiffs(
precision* topsData,
precision* topsDiff,
precision* scales,
precision* bottomsData,
precision* bottomsDiff,
unsigned int dataSize,
int dimSize,
int padded,
int localSize,
precision cacheRatioValue
)
{
unsigned int numOffset=blockIdx.x*dataSize;
int nChannelId=blockIdx.y;
unsigned int tid=threadIdx.x;
int first=max(0,nChannelId-padded);
int end=min(nChannelId+localSize-padded,gridDim.y);
for(unsigned int i=tid;i<dimSize;i+=blockDim.x)
{
precision tmp=0.0;
for(int c=first;c<end;c++)
{
int offset=numOffset+c*dimSize+i;
tmp+=topsDiff[offset]*topsData[offset]/scales[offset];
}
bottomsDiff[numOffset+nChannelId*dimSize+i]-=cacheRatioValue*(bottomsData[numOffset+nChannelId*dimSize+i]*tmp);
}
}
int CLRNLayerGPU::crossChannelBack(std::vector<Blob<precision>*>& tops,std::vector<bool>& propagateDown,std::vector<Blob<precision>*>& bottoms)
{
int channels=bottoms[0]->dataChannel;
precision beat=m_param.lrnParam.beat;
int localSize=m_param.lrnParam.localSize;
precision cacheRatioValue = 2 * m_param.lrnParam.alpha * beat / localSize;
unsigned int dataSize=bottoms[0]->size()/bottoms[0]->num;
int dimSize=bottoms[0]->dimHeight*bottoms[0]->dimWidth;
unsigned int leng=m_scales.size();
dim3 threads = min(1024,leng);
dim3 blocks = min(65535,(leng + threads.x - 1) / threads.x);
hipLaunchKernelGGL(( normalAcrossChannels), dim3(blocks),dim3(threads), 0, 0, tops[0]->gpuDiff,
m_scales.gpuData,
bottoms[0]->gpuDiff,
leng,
m_param.lrnParam.beat
);
hipError_t cudaStat=hipDeviceSynchronize();
CUDA_ERROR(cudaStat);
blocks=dim3(bottoms[0]->num,bottoms[0]->dataChannel);
threads=min(1024,dimSize);
hipLaunchKernelGGL(( computBottomDiffs), dim3(blocks),dim3(threads), 0, 0, tops[0]->gpuData,
tops[0]->gpuDiff,
m_scales.gpuData,
bottoms[0]->gpuData,
bottoms[0]->gpuDiff,
dataSize,
dimSize,
m_prePad,
localSize,
cacheRatioValue);
cudaStat=hipDeviceSynchronize();
CUDA_ERROR(cudaStat);
return NET_SUCCESS;
}
|
b34656e9ba1bec2c37e4fc73e81b07611bddcc81.cu
|
#include"LRNLayer.cuh"
int CLRNLayerGPU::setup(std::vector<Blob<precision>*>& inputs,std::vector<Blob<precision>*>& outputs)
{
DL_ASSER(m_param.lrnParam.localSize%2!=0);
m_prePad = (m_param.lrnParam.localSize-1)/2;
if(m_param.lrnParam.normRegionType == ACROSS_CHANNELS)
{
m_scales.create(inputs[0]->num,inputs[0]->dataChannel,inputs[0]->dimHeight,inputs[0]->dimWidth);
outputs[0]->create(inputs[0]->num,inputs[0]->dataChannel,inputs[0]->dimHeight,inputs[0]->dimWidth);
}
return 0;
}
//block<<<batch,channels>>>
//threads<<<min(1024,dimHeight*dimWidth)>>>
__global__ void computScales(
precision* scales,
precision* inputs,
unsigned int dataSize,
int dimSize,
int padded,
int localSize,
precision alphaOver,
int k=1
)
{
unsigned int numOffset=blockIdx.x*dataSize;
int nChannelId=blockIdx.y;
unsigned int tid=threadIdx.x;
int first=max(0,nChannelId-padded);
int end=min(nChannelId+localSize-padded,gridDim.y);
for(unsigned int i=tid;i<dimSize;i+=blockDim.x)
{
precision tmp=0.0;
for(int c=first;c<end;c++)
tmp+=pow(inputs[numOffset+c*dimSize+i],2);
scales[numOffset+nChannelId*dimSize+i]=k+tmp*alphaOver;
}
}
//block<<<weightLeng/threadNum>>>
//thread<<<min(1024,weightLeng)>>>
__global__ void normalAcrossChannels(precision* inputDatas, precision* scales,precision* normalDatas,unsigned int dataLeng,precision beat)
{
for(int i = 0; i < dataLeng; i += blockDim.x * gridDim.x)
{
int id = i + blockIdx.x * blockDim.x + threadIdx.x;
if(id < dataLeng)
normalDatas[id] = inputDatas[id]*pow(scales[id],-beat);
}
}
precision CLRNLayerGPU::feedforward(std::vector<Blob<precision>*>& bottoms,std::vector<Blob<precision>*>& tops)
{
int nRet=NET_SUCCESS;
if(m_param.lrnParam.normRegionType == ACROSS_CHANNELS)
nRet=crossChannelForward(bottoms,tops);
return nRet;
}
int CLRNLayerGPU::crossChannelForward(std::vector<Blob<precision>*>& bottoms,std::vector<Blob<precision>*>& tops)
{
int localSize=m_param.lrnParam.localSize;
precision alphaOver=m_param.lrnParam.alpha/localSize;
unsigned int dataSize=bottoms[0]->size()/bottoms[0]->num;
int dimSize=bottoms[0]->dimHeight*tops[0]->dimWidth;
dim3 blocks(bottoms[0]->num,bottoms[0]->dataChannel);
dim3 threads=min(1024,dimSize);
computScales<<<blocks,threads>>>(m_scales.gpuData,
bottoms[0]->gpuData,
dataSize,
dimSize,
m_prePad,
m_param.lrnParam.localSize,
alphaOver);
cudaError_t cudaStat=cudaDeviceSynchronize();
CUDA_ERROR(cudaStat);
unsigned int leng=m_scales.size();
threads= min(1024,leng);
blocks = min(65535, (leng + threads.x - 1) / threads.x);
normalAcrossChannels<<<blocks,threads>>>(bottoms[0]->gpuData,
m_scales.gpuData,
tops[0]->gpuData,
leng,
m_param.lrnParam.beat
);
cudaStat=cudaDeviceSynchronize();
CUDA_ERROR(cudaStat);
return NET_SUCCESS;
}
int CLRNLayerGPU::backpropagation(std::vector<Blob<precision>*>& tops,std::vector<bool>& propagateDown,std::vector<Blob<precision>*>& bottoms)
{
int nRet=NET_SUCCESS;
if(m_param.lrnParam.normRegionType == ACROSS_CHANNELS)
nRet=crossChannelBack(tops,propagateDown,bottoms);
return nRet;
}
//block<<<batch,channels>>>
//threads<<<min(1024,dimHeight*dimWidth)>>>
__global__ void computBottomDiffs(
precision* topsData,
precision* topsDiff,
precision* scales,
precision* bottomsData,
precision* bottomsDiff,
unsigned int dataSize,
int dimSize,
int padded,
int localSize,
precision cacheRatioValue
)
{
unsigned int numOffset=blockIdx.x*dataSize;
int nChannelId=blockIdx.y;
unsigned int tid=threadIdx.x;
int first=max(0,nChannelId-padded);
int end=min(nChannelId+localSize-padded,gridDim.y);
for(unsigned int i=tid;i<dimSize;i+=blockDim.x)
{
precision tmp=0.0;
for(int c=first;c<end;c++)
{
int offset=numOffset+c*dimSize+i;
tmp+=topsDiff[offset]*topsData[offset]/scales[offset];
}
bottomsDiff[numOffset+nChannelId*dimSize+i]-=cacheRatioValue*(bottomsData[numOffset+nChannelId*dimSize+i]*tmp);
}
}
int CLRNLayerGPU::crossChannelBack(std::vector<Blob<precision>*>& tops,std::vector<bool>& propagateDown,std::vector<Blob<precision>*>& bottoms)
{
int channels=bottoms[0]->dataChannel;
precision beat=m_param.lrnParam.beat;
int localSize=m_param.lrnParam.localSize;
precision cacheRatioValue = 2 * m_param.lrnParam.alpha * beat / localSize;
unsigned int dataSize=bottoms[0]->size()/bottoms[0]->num;
int dimSize=bottoms[0]->dimHeight*bottoms[0]->dimWidth;
unsigned int leng=m_scales.size();
dim3 threads = min(1024,leng);
dim3 blocks = min(65535,(leng + threads.x - 1) / threads.x);
normalAcrossChannels<<<blocks,threads>>>(tops[0]->gpuDiff,
m_scales.gpuData,
bottoms[0]->gpuDiff,
leng,
m_param.lrnParam.beat
);
cudaError_t cudaStat=cudaDeviceSynchronize();
CUDA_ERROR(cudaStat);
blocks=dim3(bottoms[0]->num,bottoms[0]->dataChannel);
threads=min(1024,dimSize);
computBottomDiffs<<<blocks,threads>>>(tops[0]->gpuData,
tops[0]->gpuDiff,
m_scales.gpuData,
bottoms[0]->gpuData,
bottoms[0]->gpuDiff,
dataSize,
dimSize,
m_prePad,
localSize,
cacheRatioValue);
cudaStat=cudaDeviceSynchronize();
CUDA_ERROR(cudaStat);
return NET_SUCCESS;
}
|
94fb97865e54a303d31c061cc4a1d0876229cfd5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/Pow.h>
#include <ATen/native/hip/zmath.cuh>
namespace at { namespace native {
namespace {
template <typename T>
static inline __host__ __device__ T powi(T a, T b) {
T result = 1;
while (b) {
if (b & 1) {
result *= a;
}
b /= 2;
a *= a;
}
return result;
}
// SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt.
// So we need to define the functions with the explicit function signatures.
// As for pow, the following signatures are defined as the device function:
// pow(float, int)
// pow(double, int)
// pow(float, float)
// pow(double, double)
// As for sqrt, the following signatures are defined as the device function:
// sqrt(float)
// sqrt(double)
// As for inverse sqrt, we must define it explicitly in MSVC, otherwise the static cast will be
// applied to the result of the inline function, and thus the result is incorrect.
// e.g. if we use 1.0 / sqrt(2) for 2 ^ (-0.5) in MSVC, we get
// int(2 ^ (-0.5)) = int(1.0 / sqrt(2)) = int(1.0 / int(1.414)) = int(1.0 / 1) = 1
// However, the correct result is
// int(2 ^ (-0.5)) = int(1.0 / 1.414) = 0
#ifdef _MSC_VER
// Functions for pow
// pow for at::Half
static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) {
return static_cast<at::Half>(::pow(static_cast<float>(base), static_cast<float>(exp)));
}
// pow (floating, floating/int)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<Base_type>::value && (std::is_same<Base_type, Exp_type>::value || std::is_same<Exp_type, int>::value), Base_type>::type
pow_(Base_type base, Exp_type exp) {
return ::pow(base, exp);
}
// pow (integral, integral)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<std::is_integral<Base_type>::value && std::is_same<Base_type, Exp_type>::value, Base_type>::type
pow_(Base_type base, Exp_type exp) {
return powi(base, exp);
}
// pow (Otherwise)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<!std::is_same<Base_type, Exp_type>::value && !std::is_same<Exp_type, int>::value, Base_type>::type
pow_(Base_type base, Exp_type exp) {
return static_cast<Base_type>(::pow(static_cast<double>(base), static_cast<double>(exp)));
}
// pow (Complex)
template<typename B, typename E>
static inline __host__ __device__ B complex_pow_(B base, E exp) {
return thrust::pow(base, exp);
}
// Functions for sqrt
// sqrt (floating)
template <typename T>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type sqrt_(T x) {
return std::sqrt(x);
}
// sqrt (integral)
template <typename T>
static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type sqrt_(T x) {
return static_cast<T>(std::sqrt(static_cast<double>(x)));
}
// Function for inverse sqrt
// invsqrt (floating)
template <typename T>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type invsqrt_(T x) {
return 1.0 / std::sqrt(x);
}
// invsqrt (integral)
template <typename T>
static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type invsqrt_(T x) {
return static_cast<T>(1.0 / std::sqrt(static_cast<double>(x)));
}
#else
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) {
return ::pow(base, exp);
}
template <typename T>
static inline __host__ __device__ T sqrt_(T x) {
return ::sqrt(x);
}
template <typename T>
static inline __host__ __device__ T invsqrt_(T x) {
return 1.0 / ::sqrt(x);
}
// Rely only on thrust for complex ops
template<typename B, typename E>
static inline __host__ __device__ B complex_pow_(B base, E exp) {
return thrust::pow(base, exp);
}
#endif
void pow_tensor_tensor_kernel(TensorIterator& iter) {
if (isComplexType(iter.dtype())) {
AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
gpu_kernel(iter, [=]GPU_LAMBDA(thrust_t base, thrust_t exp) -> thrust_t {
return complex_pow_(base, exp);
});
});
} else if (isFloatingType(iter.dtype())) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "pow_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return powi(base, exp);
});
});
}
}
template<typename Base_type, typename Exp_type>
void pow_tensor_scalar_kernel_impl(TensorIterator& iter,
Exp_type exp) {
const auto d_exp = static_cast<double>(exp);
if (d_exp == 0.5) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return sqrt_(base);
});
} else if (d_exp == 2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base;
});
} else if (d_exp == 3) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base * base;
});
} else if (d_exp == -0.5) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return invsqrt_(base);
});
} else if (d_exp == -1) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / base;
});
} else if (d_exp == -2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / (base * base);
});
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return pow_(base, exp);
});
}
}
void pow_tensor_scalar_kernel(TensorIterator& iter, Scalar exp_scalar) {
if (isComplexType(iter.dtype()) || exp_scalar.isComplex()) {
AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
const auto exp = thrust_t(exp_scalar.to<scalar_t>());
gpu_kernel(iter, [=]GPU_LAMBDA(thrust_t base) -> thrust_t {
return complex_pow_(base, exp);
});
});
} else if (isFloatingType(iter.dtype()) || exp_scalar.isIntegral(false)) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
} else {
const auto exp = exp_scalar.to<float>();
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() {
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
}
}
} // anonymous namespace
REGISTER_DISPATCH(pow_tensor_tensor_stub, &pow_tensor_tensor_kernel);
REGISTER_DISPATCH(pow_tensor_scalar_stub, &pow_tensor_scalar_kernel);
}} // namespace at::native
|
94fb97865e54a303d31c061cc4a1d0876229cfd5.cu
|
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/Pow.h>
#include <ATen/native/cuda/zmath.cuh>
namespace at { namespace native {
namespace {
template <typename T>
static inline __host__ __device__ T powi(T a, T b) {
T result = 1;
while (b) {
if (b & 1) {
result *= a;
}
b /= 2;
a *= a;
}
return result;
}
// SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt.
// So we need to define the functions with the explicit function signatures.
// As for pow, the following signatures are defined as the device function:
// pow(float, int)
// pow(double, int)
// pow(float, float)
// pow(double, double)
// As for sqrt, the following signatures are defined as the device function:
// sqrt(float)
// sqrt(double)
// As for inverse sqrt, we must define it explicitly in MSVC, otherwise the static cast will be
// applied to the result of the inline function, and thus the result is incorrect.
// e.g. if we use 1.0 / sqrt(2) for 2 ^ (-0.5) in MSVC, we get
// int(2 ^ (-0.5)) = int(1.0 / sqrt(2)) = int(1.0 / int(1.414)) = int(1.0 / 1) = 1
// However, the correct result is
// int(2 ^ (-0.5)) = int(1.0 / 1.414) = 0
#ifdef _MSC_VER
// Functions for pow
// pow for at::Half
static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) {
return static_cast<at::Half>(std::pow(static_cast<float>(base), static_cast<float>(exp)));
}
// pow (floating, floating/int)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<Base_type>::value && (std::is_same<Base_type, Exp_type>::value || std::is_same<Exp_type, int>::value), Base_type>::type
pow_(Base_type base, Exp_type exp) {
return std::pow(base, exp);
}
// pow (integral, integral)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<std::is_integral<Base_type>::value && std::is_same<Base_type, Exp_type>::value, Base_type>::type
pow_(Base_type base, Exp_type exp) {
return powi(base, exp);
}
// pow (Otherwise)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<!std::is_same<Base_type, Exp_type>::value && !std::is_same<Exp_type, int>::value, Base_type>::type
pow_(Base_type base, Exp_type exp) {
return static_cast<Base_type>(std::pow(static_cast<double>(base), static_cast<double>(exp)));
}
// pow (Complex)
template<typename B, typename E>
static inline __host__ __device__ B complex_pow_(B base, E exp) {
return thrust::pow(base, exp);
}
// Functions for sqrt
// sqrt (floating)
template <typename T>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type sqrt_(T x) {
return std::sqrt(x);
}
// sqrt (integral)
template <typename T>
static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type sqrt_(T x) {
return static_cast<T>(std::sqrt(static_cast<double>(x)));
}
// Function for inverse sqrt
// invsqrt (floating)
template <typename T>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<T>::value, T>::type invsqrt_(T x) {
return 1.0 / std::sqrt(x);
}
// invsqrt (integral)
template <typename T>
static inline __host__ __device__ typename std::enable_if<!std::is_floating_point<T>::value, T>::type invsqrt_(T x) {
return static_cast<T>(1.0 / std::sqrt(static_cast<double>(x)));
}
#else
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) {
return std::pow(base, exp);
}
template <typename T>
static inline __host__ __device__ T sqrt_(T x) {
return ::sqrt(x);
}
template <typename T>
static inline __host__ __device__ T invsqrt_(T x) {
return 1.0 / ::sqrt(x);
}
// Rely only on thrust for complex ops
template<typename B, typename E>
static inline __host__ __device__ B complex_pow_(B base, E exp) {
return thrust::pow(base, exp);
}
#endif
void pow_tensor_tensor_kernel(TensorIterator& iter) {
if (isComplexType(iter.dtype())) {
AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
gpu_kernel(iter, [=]GPU_LAMBDA(thrust_t base, thrust_t exp) -> thrust_t {
return complex_pow_(base, exp);
});
});
} else if (isFloatingType(iter.dtype())) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "pow_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return powi(base, exp);
});
});
}
}
template<typename Base_type, typename Exp_type>
void pow_tensor_scalar_kernel_impl(TensorIterator& iter,
Exp_type exp) {
const auto d_exp = static_cast<double>(exp);
if (d_exp == 0.5) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return sqrt_(base);
});
} else if (d_exp == 2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base;
});
} else if (d_exp == 3) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base * base;
});
} else if (d_exp == -0.5) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return invsqrt_(base);
});
} else if (d_exp == -1) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / base;
});
} else if (d_exp == -2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / (base * base);
});
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return pow_(base, exp);
});
}
}
void pow_tensor_scalar_kernel(TensorIterator& iter, Scalar exp_scalar) {
if (isComplexType(iter.dtype()) || exp_scalar.isComplex()) {
AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
const auto exp = thrust_t(exp_scalar.to<scalar_t>());
gpu_kernel(iter, [=]GPU_LAMBDA(thrust_t base) -> thrust_t {
return complex_pow_(base, exp);
});
});
} else if (isFloatingType(iter.dtype()) || exp_scalar.isIntegral(false)) {
AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
} else {
const auto exp = exp_scalar.to<float>();
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() {
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
}
}
} // anonymous namespace
REGISTER_DISPATCH(pow_tensor_tensor_stub, &pow_tensor_tensor_kernel);
REGISTER_DISPATCH(pow_tensor_scalar_stub, &pow_tensor_scalar_kernel);
}} // namespace at::native
|
2467115c0b3eca23ab55656c607ba03179c8bd99.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
__device__ void partition_by_bit(int *values, int bit);
__device__ int plus_scan(int *x);
__device__ void radix_sort(int *values);
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
hipEventSynchronize(start);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
__device__ int plus_scan(int *x)
{
int i = threadIdx.x; // id of thread executing this instance
int n = blockDim.x; // total number of threads in this block
int offset; // distance between elements to be added
for( offset = 1; offset < n; offset *= 2) {
int t;
if ( i >= offset )
t = x[i-offset];
__syncthreads();
if ( i >= offset )
x[i] = t + x[i];
__syncthreads();
}
return x[i];
}
__device__ void partition_by_bit(int *values, int bit)
{
int thread = threadIdx.x;
int size = blockDim.x;
int x_i = values[thread];
int p_i = (x_i >> bit) & 1;
values[thread] = p_i;
__syncthreads();
int T_before = plus_scan(values);
int T_total = values[size-1];
int F_total = size - T_total;
__syncthreads();
if ( p_i )
{
values[T_before-1 + F_total] = x_i;
}
else
{
values[thread - T_before] = x_i;
}
}
__device__ void radix_sort(int *values, int nBits, int begin)
{
int bit;
int size = nBits + begin;
for( bit = begin; bit < size; ++bit )
{
partition_by_bit(values, bit);
__syncthreads();
}
}
__global__ void sortBlk(int *in, int n, int *sortedBlocks, int bit, int nBins)
{
extern __shared__ int s[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
s[threadIdx.x] = (in[i] >> bit) & (nBins - 1);
}
__syncthreads();
radix_sort(s, 32, 0);
__syncthreads();
if(i < n)
{
sortedBlocks[i] = s[threadIdx.x];
}
__syncthreads();
}
__global__ void sortBlk2(int *in, int n, int *out, int bit, int nBins, int nBits)
{
extern __shared__ int s[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
s[threadIdx.x] = in[i];
}
__syncthreads();
radix_sort(s, nBits, bit);
__syncthreads();
if(i < n)
{
out[i] = s[threadIdx.x];
}
__syncthreads();
}
__global__ void computeHistKernel(int * in, int n, int * hist, int nBins, int gridSize)
{
extern __shared__ int s[];
for(int i = threadIdx.x; i < nBins; i += blockDim.x)
s[i] = 0;
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
int bin = in[i];
atomicAdd(&s[bin], 1);
}
__syncthreads();
for(int i = threadIdx.x; i < nBins; i += blockDim.x)
atomicAdd(&hist[blockIdx.x + i * gridSize], s[i]);
}
__global__ void scanBlkKernel(int * in, int n, int * out)
{
//TODO
extern __shared__ int s[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
s[threadIdx.x] = in[i];
else
s[threadIdx.x] = 0;
__syncthreads();
int temp;
for(int stride = 1; stride < blockDim.x; stride *= 2)
{
if(threadIdx.x >= stride)
temp = s[threadIdx.x - stride];
__syncthreads();
if(threadIdx.x >= stride)
s[threadIdx.x] += temp;
__syncthreads();
}
if(i < n - 1)
out[i + 1] = s[threadIdx.x];
out[0] = 0;
}
__global__ void scatterKernel(int * in, int n, int *sortedBlocks, int *histScan, int * out, int gridSize)
{
extern __shared__ int s[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
s[threadIdx.x] = sortedBlocks[i];
}
__syncthreads();
int before = 0;
for(int j = threadIdx.x - 1; j >= 0; j--)
if(s[threadIdx.x] == s[j])
before++;
__syncthreads();
int index = blockIdx.x + sortedBlocks[i] * gridSize;
int rank = histScan[index] + before;
out[rank] = in[i];
}
__global__ void computeHistKernel2(int * src, int n, int * hist, int nBins, int bit)
{
// TODO
// Each block computes its local hist using atomic on SMEM
extern __shared__ int s[];
for(int i = threadIdx.x; i < nBins; i += blockDim.x)
s[i] = 0;
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
int bin = (src[i] >> bit) & (nBins -1);
atomicAdd(&s[bin], 1);
}
__syncthreads();
// Each block adds its local hist to global hist using atomic on GMEM
for(int i = threadIdx.x; i < nBins; i += blockDim.x)
atomicAdd(&hist[i], s[i]);
}
void sortParallel(const uint32_t * in, int n,
uint32_t * out,
int nBits, int * blockSizes)
{
// TODO
int nBins = 1 << nBits; // 2^nBits
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
uint32_t * temp;
dim3 blockSize1(blockSizes[0]);
dim3 blockSize2(blockSizes[1]);
// Allocate device memories
int * d_hist, *d_histScan, * d_in, *d_sortedBlocks, *d_out, *d_k;
CHECK(hipMalloc(&d_in, n * sizeof(int)));
CHECK(hipMalloc(&d_out, n * sizeof(int)));
CHECK(hipMalloc(&d_sortedBlocks, n * sizeof(int)));
CHECK(hipMalloc(&d_k, n * sizeof(int)));
// Call kernel
dim3 gridSize1((n - 1) / blockSize1.x + 1);
dim3 gridSize2((n - 1) / blockSize2.x + 1);
CHECK(hipMalloc(&d_hist, nBins * gridSize1.x * sizeof(int)));
CHECK(hipMalloc(&d_histScan, nBins * gridSize1.x * sizeof(int)));
size_t smemSize = blockSize1.x*sizeof(int);
size_t smemSizeHist = nBins*sizeof(int);
hipStream_t stream1, stream2;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
int * hist = (int *)malloc(nBins * gridSize1.x * sizeof(int));
int * histScan = (int *)malloc(nBins * gridSize1.x * sizeof(int));
GpuTimer timer;
int i = 0;
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
printf("%d: \n", i);
timer.Start();
CHECK(hipMemcpy(d_in, src, n * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( sortBlk), dim3(gridSize1), dim3(blockSize1), smemSize, stream1, d_in, n, d_sortedBlocks, bit, nBins);
hipLaunchKernelGGL(( sortBlk2), dim3(gridSize1), dim3(blockSize1), smemSize, stream2, d_in, n, d_k, bit, nBins, nBits);
hipDeviceSynchronize();
timer.Stop();
printf("Sort block: %.3f ms\n", timer.Elapsed());
// TODO: Compute "hist" of the current digit
timer.Start();
CHECK(hipMemset(d_hist, 0, nBins * gridSize1.x * sizeof(int)));
hipLaunchKernelGGL(( computeHistKernel), dim3(gridSize1), dim3(blockSize1), smemSizeHist, 0, d_sortedBlocks, n, d_hist, nBins, gridSize1.x);
CHECK(hipMemcpy(hist, d_hist, nBins * gridSize1.x * sizeof(int), hipMemcpyDeviceToHost));
timer.Stop();
printf("Hist: %.3f ms\n", timer.Elapsed());
//TODO: Scan "hist" (exclusively) and save the result to "histScan"
timer.Start();
histScan[0] = 0;
for (int bin = 1; bin < nBins * gridSize1.x; bin++)
histScan[bin] = histScan[bin - 1] + hist[bin - 1];
CHECK(hipMemcpy(d_histScan, histScan, nBins * gridSize1.x * sizeof(int), hipMemcpyHostToDevice));
timer.Stop();
printf("Scan: %.3f ms\n", timer.Elapsed());
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
timer.Start();
hipLaunchKernelGGL(( scatterKernel), dim3(gridSize1), dim3(blockSize1), smemSize, 0, d_k, n, d_sortedBlocks, d_histScan, d_out, gridSize1.x);
CHECK(hipMemcpy(dst, d_out, n * sizeof(int), hipMemcpyDeviceToHost));
timer.Stop();
printf("Scatter: %.3f ms\n", timer.Elapsed());
// TODO: Swap "src" and "dst"
temp = src;
src = dst;
dst = temp;
i++;
}
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(originalSrc);
free(hist);
free(histScan);
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
// Free device memories
CHECK(hipFree(d_in));
CHECK(hipFree(d_out));
CHECK(hipFree(d_hist));
CHECK(hipFree(d_histScan));
CHECK(hipFree(d_sortedBlocks));
CHECK(hipFree(d_k));
}
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int * blockSizes)
{
// TODO
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
// Radix sort
void sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
bool useDevice=false, int * blockSizes=NULL)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix sort Satish parallel\n");
sortParallel(in, n, out, nBits, blockSizes);
}
else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
printf("%d\n", i);
printf("%d\n", out[i]);
printf("%d\n", correctOut[i]);
return;
}
}
printf("CORRECT\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 20);
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4)
{
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]);
sort(in, n, out, nBits, false, blockSizes);
//printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, correctOut, nBits, true, blockSizes);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
2467115c0b3eca23ab55656c607ba03179c8bd99.cu
|
#include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
__device__ void partition_by_bit(int *values, int bit);
__device__ int plus_scan(int *x);
__device__ void radix_sort(int *values);
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
__device__ int plus_scan(int *x)
{
int i = threadIdx.x; // id of thread executing this instance
int n = blockDim.x; // total number of threads in this block
int offset; // distance between elements to be added
for( offset = 1; offset < n; offset *= 2) {
int t;
if ( i >= offset )
t = x[i-offset];
__syncthreads();
if ( i >= offset )
x[i] = t + x[i];
__syncthreads();
}
return x[i];
}
__device__ void partition_by_bit(int *values, int bit)
{
int thread = threadIdx.x;
int size = blockDim.x;
int x_i = values[thread];
int p_i = (x_i >> bit) & 1;
values[thread] = p_i;
__syncthreads();
int T_before = plus_scan(values);
int T_total = values[size-1];
int F_total = size - T_total;
__syncthreads();
if ( p_i )
{
values[T_before-1 + F_total] = x_i;
}
else
{
values[thread - T_before] = x_i;
}
}
__device__ void radix_sort(int *values, int nBits, int begin)
{
int bit;
int size = nBits + begin;
for( bit = begin; bit < size; ++bit )
{
partition_by_bit(values, bit);
__syncthreads();
}
}
__global__ void sortBlk(int *in, int n, int *sortedBlocks, int bit, int nBins)
{
extern __shared__ int s[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
s[threadIdx.x] = (in[i] >> bit) & (nBins - 1);
}
__syncthreads();
radix_sort(s, 32, 0);
__syncthreads();
if(i < n)
{
sortedBlocks[i] = s[threadIdx.x];
}
__syncthreads();
}
__global__ void sortBlk2(int *in, int n, int *out, int bit, int nBins, int nBits)
{
extern __shared__ int s[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
s[threadIdx.x] = in[i];
}
__syncthreads();
radix_sort(s, nBits, bit);
__syncthreads();
if(i < n)
{
out[i] = s[threadIdx.x];
}
__syncthreads();
}
__global__ void computeHistKernel(int * in, int n, int * hist, int nBins, int gridSize)
{
extern __shared__ int s[];
for(int i = threadIdx.x; i < nBins; i += blockDim.x)
s[i] = 0;
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
int bin = in[i];
atomicAdd(&s[bin], 1);
}
__syncthreads();
for(int i = threadIdx.x; i < nBins; i += blockDim.x)
atomicAdd(&hist[blockIdx.x + i * gridSize], s[i]);
}
__global__ void scanBlkKernel(int * in, int n, int * out)
{
//TODO
extern __shared__ int s[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
s[threadIdx.x] = in[i];
else
s[threadIdx.x] = 0;
__syncthreads();
int temp;
for(int stride = 1; stride < blockDim.x; stride *= 2)
{
if(threadIdx.x >= stride)
temp = s[threadIdx.x - stride];
__syncthreads();
if(threadIdx.x >= stride)
s[threadIdx.x] += temp;
__syncthreads();
}
if(i < n - 1)
out[i + 1] = s[threadIdx.x];
out[0] = 0;
}
__global__ void scatterKernel(int * in, int n, int *sortedBlocks, int *histScan, int * out, int gridSize)
{
extern __shared__ int s[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
s[threadIdx.x] = sortedBlocks[i];
}
__syncthreads();
int before = 0;
for(int j = threadIdx.x - 1; j >= 0; j--)
if(s[threadIdx.x] == s[j])
before++;
__syncthreads();
int index = blockIdx.x + sortedBlocks[i] * gridSize;
int rank = histScan[index] + before;
out[rank] = in[i];
}
__global__ void computeHistKernel2(int * src, int n, int * hist, int nBins, int bit)
{
// TODO
// Each block computes its local hist using atomic on SMEM
extern __shared__ int s[];
for(int i = threadIdx.x; i < nBins; i += blockDim.x)
s[i] = 0;
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
int bin = (src[i] >> bit) & (nBins -1);
atomicAdd(&s[bin], 1);
}
__syncthreads();
// Each block adds its local hist to global hist using atomic on GMEM
for(int i = threadIdx.x; i < nBins; i += blockDim.x)
atomicAdd(&hist[i], s[i]);
}
void sortParallel(const uint32_t * in, int n,
uint32_t * out,
int nBits, int * blockSizes)
{
// TODO
int nBins = 1 << nBits; // 2^nBits
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
uint32_t * temp;
dim3 blockSize1(blockSizes[0]);
dim3 blockSize2(blockSizes[1]);
// Allocate device memories
int * d_hist, *d_histScan, * d_in, *d_sortedBlocks, *d_out, *d_k;
CHECK(cudaMalloc(&d_in, n * sizeof(int)));
CHECK(cudaMalloc(&d_out, n * sizeof(int)));
CHECK(cudaMalloc(&d_sortedBlocks, n * sizeof(int)));
CHECK(cudaMalloc(&d_k, n * sizeof(int)));
// Call kernel
dim3 gridSize1((n - 1) / blockSize1.x + 1);
dim3 gridSize2((n - 1) / blockSize2.x + 1);
CHECK(cudaMalloc(&d_hist, nBins * gridSize1.x * sizeof(int)));
CHECK(cudaMalloc(&d_histScan, nBins * gridSize1.x * sizeof(int)));
size_t smemSize = blockSize1.x*sizeof(int);
size_t smemSizeHist = nBins*sizeof(int);
cudaStream_t stream1, stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
int * hist = (int *)malloc(nBins * gridSize1.x * sizeof(int));
int * histScan = (int *)malloc(nBins * gridSize1.x * sizeof(int));
GpuTimer timer;
int i = 0;
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
printf("%d: \n", i);
timer.Start();
CHECK(cudaMemcpy(d_in, src, n * sizeof(int), cudaMemcpyHostToDevice));
sortBlk<<<gridSize1, blockSize1, smemSize, stream1>>>(d_in, n, d_sortedBlocks, bit, nBins);
sortBlk2<<<gridSize1, blockSize1, smemSize, stream2>>>(d_in, n, d_k, bit, nBins, nBits);
cudaDeviceSynchronize();
timer.Stop();
printf("Sort block: %.3f ms\n", timer.Elapsed());
// TODO: Compute "hist" of the current digit
timer.Start();
CHECK(cudaMemset(d_hist, 0, nBins * gridSize1.x * sizeof(int)));
computeHistKernel<<<gridSize1, blockSize1, smemSizeHist>>>(d_sortedBlocks, n, d_hist, nBins, gridSize1.x);
CHECK(cudaMemcpy(hist, d_hist, nBins * gridSize1.x * sizeof(int), cudaMemcpyDeviceToHost));
timer.Stop();
printf("Hist: %.3f ms\n", timer.Elapsed());
//TODO: Scan "hist" (exclusively) and save the result to "histScan"
timer.Start();
histScan[0] = 0;
for (int bin = 1; bin < nBins * gridSize1.x; bin++)
histScan[bin] = histScan[bin - 1] + hist[bin - 1];
CHECK(cudaMemcpy(d_histScan, histScan, nBins * gridSize1.x * sizeof(int), cudaMemcpyHostToDevice));
timer.Stop();
printf("Scan: %.3f ms\n", timer.Elapsed());
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
timer.Start();
scatterKernel<<<gridSize1, blockSize1, smemSize>>>(d_k, n, d_sortedBlocks, d_histScan, d_out, gridSize1.x);
CHECK(cudaMemcpy(dst, d_out, n * sizeof(int), cudaMemcpyDeviceToHost));
timer.Stop();
printf("Scatter: %.3f ms\n", timer.Elapsed());
// TODO: Swap "src" and "dst"
temp = src;
src = dst;
dst = temp;
i++;
}
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(originalSrc);
free(hist);
free(histScan);
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
// Free device memories
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out));
CHECK(cudaFree(d_hist));
CHECK(cudaFree(d_histScan));
CHECK(cudaFree(d_sortedBlocks));
CHECK(cudaFree(d_k));
}
void sortByDevice(const uint32_t * in, int n,
uint32_t * out,
int nBits, int * blockSizes)
{
// TODO
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
// Radix sort
void sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
bool useDevice=false, int * blockSizes=NULL)
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
printf("\nRadix sort Satish parallel\n");
sortParallel(in, n, out, nBits, blockSizes);
}
else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("INCORRECT :(\n");
printf("%d\n", i);
printf("%d\n", out[i]);
printf("%d\n", correctOut[i]);
return;
}
}
printf("CORRECT\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 20);
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out = (uint32_t *)malloc(bytes); // Device result
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4)
{
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]);
sort(in, n, out, nBits, false, blockSizes);
//printArray(correctOut, n);
// SORT BY DEVICE
sort(in, n, correctOut, nBits, true, blockSizes);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
453029ad696aceb83450bcde4ca6d4bae764c000.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014 BVLC and contributors.
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
Dtype maxval = -FLT_MAX;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
maxval = max(maxval, bottom_data[h * width + w]);
}
}
top_data[index] = maxval;
}
}
template <typename Dtype>
__global__ void MinPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
Dtype minval = FLT_MAX;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
minval = min(minval, bottom_data[h * width + w]);
}
}
top_data[index] = minval;
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, const int pad, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride - pad;
int wstart = pw * stride - pad;
int hend = min(hstart + kernel_size, height + pad);
int wend = min(wstart + kernel_size, width + pad);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* rand_idx, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
Dtype PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
pad_, top_data);
break;
case PoolingParameter_PoolMethod_MIN:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MinPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (Caffe::phase() == Caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* bottom_data,
const Dtype* top_data, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
Dtype bottom_datum =
bottom_data[((n * channels + c) * height + h) * width + w];
top_data += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(bottom_datum == top_data[ph * pooled_width + pw]);
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void MinPoolBackward(const int nthreads, const Dtype* bottom_data,
const Dtype* top_data, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
Dtype bottom_datum =
bottom_data[((n * channels + c) * height + h) * width + w];
top_data += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(bottom_datum == top_data[ph * pooled_width + pw]);
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, const int pad,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride - pad;
int wstart = pw * stride - pad;
int hend = min(hstart + kernel_size, height + pad);
int wend = min(wstart + kernel_size, width + pad);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* rand_idx, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
rand_idx += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
int count = (*bottom)[0]->count();
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_size_, stride_, bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
pad_, bottom_diff);
break;
case PoolingParameter_PoolMethod_MIN:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MinPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_size_, stride_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_size_, stride_, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(PoolingLayer);
} // namespace caffe
|
453029ad696aceb83450bcde4ca6d4bae764c000.cu
|
// Copyright 2014 BVLC and contributors.
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
Dtype maxval = -FLT_MAX;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
maxval = max(maxval, bottom_data[h * width + w]);
}
}
top_data[index] = maxval;
}
}
template <typename Dtype>
__global__ void MinPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
Dtype minval = FLT_MAX;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
minval = min(minval, bottom_data[h * width + w]);
}
}
top_data[index] = minval;
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, const int pad, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride - pad;
int wstart = pw * stride - pad;
int hend = min(hstart + kernel_size, height + pad);
int wend = min(wstart + kernel_size, width + pad);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* rand_idx, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride;
int hend = min(hstart + kernel_size, height);
int wstart = pw * stride;
int wend = min(wstart + kernel_size, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
Dtype PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
pad_, top_data);
break;
case PoolingParameter_PoolMethod_MIN:
// NOLINT_NEXT_LINE(whitespace/operators)
MinPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (Caffe::phase() == Caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* bottom_data,
const Dtype* top_data, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
Dtype bottom_datum =
bottom_data[((n * channels + c) * height + h) * width + w];
top_data += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(bottom_datum == top_data[ph * pooled_width + pw]);
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void MinPoolBackward(const int nthreads, const Dtype* bottom_data,
const Dtype* top_data, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
Dtype bottom_datum =
bottom_data[((n * channels + c) * height + h) * width + w];
top_data += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(bottom_datum == top_data[ph * pooled_width + pw]);
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, const int pad,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride - pad;
int wstart = pw * stride - pad;
int hend = min(hstart + kernel_size, height + pad);
int wend = min(wstart + kernel_size, width + pad);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* rand_idx, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_size, const int stride, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_size) ? 0 : (h - kernel_size) / stride + 1;
int phend = min(h / stride + 1, pooled_height);
int pwstart = (w < kernel_size) ? 0 : (w - kernel_size) / stride + 1;
int pwend = min(w / stride + 1, pooled_width);
Dtype gradient = 0;
rand_idx += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
int count = (*bottom)[0]->count();
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_size_, stride_, bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_size_, stride_,
pad_, bottom_diff);
break;
case PoolingParameter_PoolMethod_MIN:
// NOLINT_NEXT_LINE(whitespace/operators)
MinPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_size_, stride_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_size_, stride_, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(PoolingLayer);
} // namespace caffe
|
49dd82489711199791cc21166551a53fa1bddec5.hip
|
// !!! This is a file automatically generated by hipify!!!
#define _USE_MATH_DEFINES
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define DOF 1
#define CACHE_FIRSTBOUNCE 1
#define MATERIAL_SORT 0
#define ENABLE_AA 1
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static ShadeableIntersection * dev_intersections_cache = NULL;
static int * dev_materialIds1 = NULL;
static int * dev_materialIds2 = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
hipMalloc(&dev_materialIds1, pixelcount * sizeof(int));
hipMemset(dev_materialIds1, 0, pixelcount * sizeof(int));
hipMalloc(&dev_materialIds2, pixelcount * sizeof(int));
hipMemset(dev_materialIds2, 0, pixelcount * sizeof(int));
hipMalloc(&dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections_cache, 0, pixelcount * sizeof(ShadeableIntersection));
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
hipFree(dev_materialIds1);
hipFree(dev_materialIds2);
hipFree(dev_intersections_cache);
checkCUDAError("pathtraceFree");
}
__host__ __device__ glm::vec2 ConcentricSampleDisk(float u1, float u2) {
float r, theta;
float sx = 2 * u1 - 1;
float sy = 2 * u2 - 1;
if (sx == 0 && sy == 0) return glm::vec2(0);
if (std::abs(sx) > std::abs(sy)) {
r = sx;
theta = (M_PI * 0.25f) * (sy / sx);
}
else {
r = sy;
theta = (M_PI * 0.5f) - (M_PI * 0.25f) * (sx / sy);
}
return r * glm::vec2(std::cos(theta), std::sin(theta));
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
glm::vec2 jitter(u01(rng), u01(rng));
float sx, sy;
if (ENABLE_AA){
//stochastic aa
sx = (float)x + jitter.x;
sy = (float)y + jitter.y;
}
else{
sx = (float)x;
sy = (float)y;
}
segment.ray.direction = glm::normalize(
cam.view
- cam.right * cam.pixelLength.x * (sx - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * (sy - (float)cam.resolution.y * 0.5f)
);
if (DOF && cam.lensRadius > 0.f) {
glm::vec2 lens = ConcentricSampleDisk(u01(rng), u01(rng));
lens.x *= cam.lensRadius;
lens.y *= cam.lensRadius;
float ft = glm::abs(cam.focalDistance / segment.ray.direction.z);
glm::vec3 Pfocus = segment.ray.origin + segment.ray.direction * ft;
segment.ray.origin += lens.x * cam.right + lens.y * cam.up;
segment.ray.direction = glm::normalize(Pfocus - segment.ray.origin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
//
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].intersectPoint = intersect_point;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeMaterial(
int iter
, int depth
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (pathSegments[idx].remainingBounces > 0 && intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
else {
scatterRay(pathSegments[idx], intersection.intersectPoint, intersection.surfaceNormal, material, makeSeededRandomEngine(iter, idx, 0), iter, depth);
}
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths && iterationPaths[index].remainingBounces == 0)
{
image[iterationPaths[index].pixelIndex] += iterationPaths[index].color;
}
}
struct terminatePath
{
__host__ __device__
bool operator()(const PathSegment& pathSeg)
{
return (pathSeg.remainingBounces == 0);
}
};
__global__ void kernSetMaterialIds(int nPaths, int* dev_materialIds1, int* dev_materialIds2, ShadeableIntersection* dev_intersections) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= nPaths) return;
dev_materialIds1[index] = dev_materialIds2[index] = dev_intersections[index].materialId;
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
int pathsFlight = num_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
float computeMilliseconds = 0.f, time = 0.f, sortMilliseconds = 0.f;
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
hipEventRecord(start);
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
if (CACHE_FIRSTBOUNCE && (iter == 1 && depth == 0) || depth > 0) {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
}
if (!CACHE_FIRSTBOUNCE)
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
if (CACHE_FIRSTBOUNCE && iter == 1 && depth == 0) {
hipMemcpy(dev_intersections_cache, dev_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
else if (CACHE_FIRSTBOUNCE && iter > 1 && depth == 0) {
hipMemcpy(dev_intersections, dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice);
}
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&computeMilliseconds, start, stop);
time += computeMilliseconds;
//
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
//need two different device variables
if (MATERIAL_SORT){
hipEventRecord(start);
kernSetMaterialIds << <numblocksPathSegmentTracing, blockSize1d >> >(num_paths, dev_materialIds1, dev_materialIds2, dev_intersections);
checkCUDAError("set material ids");
thrust::sort_by_key(thrust::device, dev_materialIds1, dev_materialIds1 + num_paths, dev_paths);
thrust::sort_by_key(thrust::device, dev_materialIds2, dev_materialIds2 + num_paths, dev_intersections);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&sortMilliseconds, start, stop);
time += sortMilliseconds;
}
shadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
depth,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
dim3 numBlocksPixels = (num_paths + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths);
PathSegment* newPathEnd = thrust::remove_if(
thrust::device,
dev_paths,
dev_paths + num_paths,
terminatePath());
num_paths = newPathEnd - dev_paths;
iterationComplete = depth >= traceDepth || num_paths == 0;
}
std::cout << "compute intersection took: " << computeMilliseconds << " ms" << std::endl;
std::cout << "sort intersection took: " << computeMilliseconds << " ms" << std::endl;
std::cout << "total time: " << time << " ms" << std::endl;
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
49dd82489711199791cc21166551a53fa1bddec5.cu
|
#define _USE_MATH_DEFINES
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define DOF 1
#define CACHE_FIRSTBOUNCE 1
#define MATERIAL_SORT 0
#define ENABLE_AA 1
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static ShadeableIntersection * dev_intersections_cache = NULL;
static int * dev_materialIds1 = NULL;
static int * dev_materialIds2 = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
cudaMalloc(&dev_materialIds1, pixelcount * sizeof(int));
cudaMemset(dev_materialIds1, 0, pixelcount * sizeof(int));
cudaMalloc(&dev_materialIds2, pixelcount * sizeof(int));
cudaMemset(dev_materialIds2, 0, pixelcount * sizeof(int));
cudaMalloc(&dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections_cache, 0, pixelcount * sizeof(ShadeableIntersection));
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
cudaFree(dev_materialIds1);
cudaFree(dev_materialIds2);
cudaFree(dev_intersections_cache);
checkCUDAError("pathtraceFree");
}
__host__ __device__ glm::vec2 ConcentricSampleDisk(float u1, float u2) {
float r, theta;
float sx = 2 * u1 - 1;
float sy = 2 * u2 - 1;
if (sx == 0 && sy == 0) return glm::vec2(0);
if (std::abs(sx) > std::abs(sy)) {
r = sx;
theta = (M_PI * 0.25f) * (sy / sx);
}
else {
r = sy;
theta = (M_PI * 0.5f) - (M_PI * 0.25f) * (sx / sy);
}
return r * glm::vec2(std::cos(theta), std::sin(theta));
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
glm::vec2 jitter(u01(rng), u01(rng));
float sx, sy;
if (ENABLE_AA){
//stochastic aa
sx = (float)x + jitter.x;
sy = (float)y + jitter.y;
}
else{
sx = (float)x;
sy = (float)y;
}
segment.ray.direction = glm::normalize(
cam.view
- cam.right * cam.pixelLength.x * (sx - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * (sy - (float)cam.resolution.y * 0.5f)
);
if (DOF && cam.lensRadius > 0.f) {
glm::vec2 lens = ConcentricSampleDisk(u01(rng), u01(rng));
lens.x *= cam.lensRadius;
lens.y *= cam.lensRadius;
float ft = glm::abs(cam.focalDistance / segment.ray.direction.z);
glm::vec3 Pfocus = segment.ray.origin + segment.ray.direction * ft;
segment.ray.origin += lens.x * cam.right + lens.y * cam.up;
segment.ray.direction = glm::normalize(Pfocus - segment.ray.origin);
}
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
//
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
intersections[path_index].intersectPoint = intersect_point;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeMaterial(
int iter
, int depth
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (pathSegments[idx].remainingBounces > 0 && intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
else {
scatterRay(pathSegments[idx], intersection.intersectPoint, intersection.surfaceNormal, material, makeSeededRandomEngine(iter, idx, 0), iter, depth);
}
}
else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths && iterationPaths[index].remainingBounces == 0)
{
image[iterationPaths[index].pixelIndex] += iterationPaths[index].color;
}
}
struct terminatePath
{
__host__ __device__
bool operator()(const PathSegment& pathSeg)
{
return (pathSeg.remainingBounces == 0);
}
};
__global__ void kernSetMaterialIds(int nPaths, int* dev_materialIds1, int* dev_materialIds2, ShadeableIntersection* dev_intersections) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= nPaths) return;
dev_materialIds1[index] = dev_materialIds2[index] = dev_intersections[index].materialId;
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
int pathsFlight = num_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
float computeMilliseconds = 0.f, time = 0.f, sortMilliseconds = 0.f;
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
cudaEventRecord(start);
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
if (CACHE_FIRSTBOUNCE && (iter == 1 && depth == 0) || depth > 0) {
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
}
if (!CACHE_FIRSTBOUNCE)
computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
if (CACHE_FIRSTBOUNCE && iter == 1 && depth == 0) {
cudaMemcpy(dev_intersections_cache, dev_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
else if (CACHE_FIRSTBOUNCE && iter > 1 && depth == 0) {
cudaMemcpy(dev_intersections, dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice);
}
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&computeMilliseconds, start, stop);
time += computeMilliseconds;
//
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
//need two different device variables
if (MATERIAL_SORT){
cudaEventRecord(start);
kernSetMaterialIds << <numblocksPathSegmentTracing, blockSize1d >> >(num_paths, dev_materialIds1, dev_materialIds2, dev_intersections);
checkCUDAError("set material ids");
thrust::sort_by_key(thrust::device, dev_materialIds1, dev_materialIds1 + num_paths, dev_paths);
thrust::sort_by_key(thrust::device, dev_materialIds2, dev_materialIds2 + num_paths, dev_intersections);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&sortMilliseconds, start, stop);
time += sortMilliseconds;
}
shadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > (
iter,
depth,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
dim3 numBlocksPixels = (num_paths + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths);
PathSegment* newPathEnd = thrust::remove_if(
thrust::device,
dev_paths,
dev_paths + num_paths,
terminatePath());
num_paths = newPathEnd - dev_paths;
iterationComplete = depth >= traceDepth || num_paths == 0;
}
std::cout << "compute intersection took: " << computeMilliseconds << " ms" << std::endl;
std::cout << "sort intersection took: " << computeMilliseconds << " ms" << std::endl;
std::cout << "total time: " << time << " ms" << std::endl;
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
615e28f35b9749b8ff6908cd218f400b5cb0e4aa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "average_relative_flow.h"
#include "utils/sampling_helpers.h"
#include "utils/simple_serializer.h"
#include "utils/time_stamp.h"
#include <mirheo/core/celllist.h>
#include <mirheo/core/pvs/object_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/mpi_types.h>
namespace mirheo
{
namespace AverageRelativeFlowKernels
{
__global__ void sampleRelative(
PVview pvView, CellListInfo cinfo,
real* avgDensity,
ChannelsInfo channelsInfo,
real3 relativePoint)
{
const int pid = threadIdx.x + blockIdx.x*blockDim.x;
if (pid >= pvView.size) return;
real3 r = make_real3(pvView.readPosition(pid));
r -= relativePoint;
int3 cid3 = cinfo.getCellIdAlongAxes<CellListsProjection::NoClamp>(r);
cid3 = (cid3 + cinfo.ncells) % cinfo.ncells;
const int cid = cinfo.encode(cid3);
atomicAdd(avgDensity + cid, 1);
SamplingHelpersKernels::sampleChannels(pid, cid, channelsInfo);
}
} // namespace AverageRelativeFlowKernels
AverageRelative3D::AverageRelative3D(
const MirState *state, std::string name, std::vector<std::string> pvNames,
std::vector<std::string> channelNames, int sampleEvery,
int dumpEvery, real3 binSize, std::string relativeOVname, int relativeID) :
Average3D(state, name, pvNames, channelNames, sampleEvery, dumpEvery, binSize),
relativeOVname_(relativeOVname),
relativeID_(relativeID)
{}
void AverageRelative3D::setup(Simulation* simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
Average3D::setup(simulation, comm, interComm);
int local_size = numberDensity_.size();
int global_size = local_size * nranks_;
localNumberDensity_ .resize(local_size);
numberDensity_ .resize_anew(global_size);
accumulatedNumberDensity_.resize_anew(global_size);
numberDensity_.clear(defaultStream);
localChannels_.resize(channelsInfo_.n);
for (int i = 0; i < channelsInfo_.n; ++i)
{
local_size = channelsInfo_.average[i].size();
global_size = local_size * nranks_;
localChannels_[i].resize(local_size);
channelsInfo_.average[i].resize_anew(global_size);
accumulatedAverage_ [i].resize_anew(global_size);
channelsInfo_.average[i].clear(defaultStream);
channelsInfo_.averagePtrs[i] = channelsInfo_.average[i].devPtr();
}
channelsInfo_.averagePtrs.uploadToDevice(defaultStream);
channelsInfo_.types.uploadToDevice(defaultStream);
// Relative stuff
relativeOV_ = simulation->getOVbyNameOrDie(relativeOVname_);
if ( !relativeOV_->local()->dataPerObject.checkChannelExists(ChannelNames::motions) )
die("Only rigid objects are supported for relative flow, but got OV '%s'", relativeOV_->getCName());
const int locsize = relativeOV_->local()->getNumObjects();
int totsize {0};
MPI_Check( MPI_Reduce(&locsize, &totsize, 1, MPI_INT, MPI_SUM, 0, comm) );
if (rank_ == 0 && relativeID_ >= totsize)
die("Too few objects in OV '%s' (only %d); but requested id %d",
relativeOV_->getCName(), totsize, relativeID_);
}
void AverageRelative3D::sampleOnePv(real3 relativeParam, ParticleVector *pv, hipStream_t stream)
{
const CellListInfo cinfo(binSize_, getState()->domain.globalSize);
PVview pvView(pv, pv->local());
ChannelsInfo gpuInfo(channelsInfo_, pv, stream);
const int nthreads = 128;
SAFE_KERNEL_LAUNCH
(AverageRelativeFlowKernels::sampleRelative,
getNblocks(pvView.size, nthreads), nthreads, 0, stream,
pvView, cinfo, numberDensity_.devPtr(), gpuInfo, relativeParam);
}
void AverageRelative3D::afterIntegration(hipStream_t stream)
{
const int TAG = 22;
const int NCOMPONENTS = 2 * sizeof(real3) / sizeof(real);
if (!isTimeEvery(getState(), sampleEvery_)) return;
debug2("Plugin %s is sampling now", getCName());
real3 relativeParams[2] = {make_real3(0.0_r), make_real3(0.0_r)};
// Find and broadcast the position and velocity of the relative object
MPI_Request req;
MPI_Check( MPI_Irecv(relativeParams, NCOMPONENTS, getMPIFloatType<real>(), MPI_ANY_SOURCE, TAG, comm_, &req) );
auto ids = relativeOV_->local()->dataPerObject.getData<int64_t>(ChannelNames::globalIds);
auto motions = relativeOV_->local()->dataPerObject.getData<RigidMotion>(ChannelNames::motions);
ids ->downloadFromDevice(stream, ContainersSynch::Asynch);
motions->downloadFromDevice(stream, ContainersSynch::Synch);
for (size_t i = 0; i < ids->size(); i++)
{
if ((*ids)[i] == relativeID_)
{
real3 params[2] = { make_real3( (*motions)[i].r ),
make_real3( (*motions)[i].vel ) };
params[0] = getState()->domain.local2global(params[0]);
for (int r = 0; r < nranks_; r++)
MPI_Send(¶ms, NCOMPONENTS, getMPIFloatType<real>(), r, TAG, comm_);
break;
}
}
MPI_Check( MPI_Wait(&req, MPI_STATUS_IGNORE) );
relativeParams[0] = getState()->domain.global2local(relativeParams[0]);
for (auto& pv : pvs_)
sampleOnePv(relativeParams[0], pv, stream);
accumulateSampledAndClear(stream);
averageRelativeVelocity_ += relativeParams[1];
nSamples_++;
}
void AverageRelative3D::extractLocalBlock()
{
static const double scale_by_density = -1.0;
auto oneChannel = [this] (const PinnedBuffer<double>& channel, Average3D::ChannelType type, double scale, std::vector<double>& dest) {
MPI_Check( MPI_Allreduce(MPI_IN_PLACE, channel.hostPtr(), channel.size(), MPI_DOUBLE, MPI_SUM, comm_) );
const int ncomponents = this->getNcomponents(type);
const int3 globalResolution = resolution_ * nranks3D_;
double factor;
int dstId = 0;
for (int k = rank3D_.z * resolution_.z; k < (rank3D_.z+1) * resolution_.z; ++k)
{
for (int j = rank3D_.y * resolution_.y; j < (rank3D_.y+1) * resolution_.y; ++j)
{
for (int i = rank3D_.x * resolution_.x; i < (rank3D_.x+1) * resolution_.x; ++i)
{
const int scalId = (k*globalResolution.y*globalResolution.x + j*globalResolution.x + i);
int srcId = ncomponents * scalId;
for (int c = 0; c < ncomponents; ++c)
{
if (scale == scale_by_density) factor = 1.0_r / accumulatedNumberDensity_[scalId];
else factor = scale;
dest[dstId++] = channel[srcId] * factor;
srcId++;
}
}
}
}
};
// Order is important! Density comes first
oneChannel(accumulatedNumberDensity_, Average3D::ChannelType::Scalar, 1.0 / (nSamples_ * binSize_.x*binSize_.y*binSize_.z), localNumberDensity_);
for (int i = 0; i < channelsInfo_.n; ++i)
oneChannel(accumulatedAverage_[i], channelsInfo_.types[i], scale_by_density, localChannels_[i]);
}
void AverageRelative3D::serializeAndSend(hipStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
for (int i = 0; i < channelsInfo_.n; ++i)
{
auto& data = accumulatedAverage_[i];
if (channelsInfo_.names[i] == ChannelNames::velocities)
{
constexpr int nthreads = 128;
const int numVec3 = data.size() / 3;
SAFE_KERNEL_LAUNCH
(SamplingHelpersKernels::correctVelocity,
getNblocks(numVec3, nthreads), nthreads, 0, stream,
numVec3, reinterpret_cast<double3*> (data.devPtr()),
accumulatedNumberDensity_.devPtr(), averageRelativeVelocity_ / static_cast<real>(nSamples_));
averageRelativeVelocity_ = make_real3(0.0_r);
}
}
accumulatedNumberDensity_.downloadFromDevice(stream, ContainersSynch::Asynch);
accumulatedNumberDensity_.clearDevice(stream);
for (auto& data : accumulatedAverage_)
{
data.downloadFromDevice(stream, ContainersSynch::Asynch);
data.clearDevice(stream);
}
CUDA_Check( hipStreamSynchronize(stream) );
extractLocalBlock();
nSamples_ = 0;
MirState::StepType timeStamp = getTimeStamp(getState(), dumpEvery_) - 1; // -1 to start from 0
debug2("Plugin '%s' is now packing the data", getCName());
waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, getState()->currentTime, timeStamp, localNumberDensity_, localChannels_);
send(sendBuffer_);
}
} // namespace mirheo
|
615e28f35b9749b8ff6908cd218f400b5cb0e4aa.cu
|
#include "average_relative_flow.h"
#include "utils/sampling_helpers.h"
#include "utils/simple_serializer.h"
#include "utils/time_stamp.h"
#include <mirheo/core/celllist.h>
#include <mirheo/core/pvs/object_vector.h>
#include <mirheo/core/pvs/views/pv.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/mpi_types.h>
namespace mirheo
{
namespace AverageRelativeFlowKernels
{
__global__ void sampleRelative(
PVview pvView, CellListInfo cinfo,
real* avgDensity,
ChannelsInfo channelsInfo,
real3 relativePoint)
{
const int pid = threadIdx.x + blockIdx.x*blockDim.x;
if (pid >= pvView.size) return;
real3 r = make_real3(pvView.readPosition(pid));
r -= relativePoint;
int3 cid3 = cinfo.getCellIdAlongAxes<CellListsProjection::NoClamp>(r);
cid3 = (cid3 + cinfo.ncells) % cinfo.ncells;
const int cid = cinfo.encode(cid3);
atomicAdd(avgDensity + cid, 1);
SamplingHelpersKernels::sampleChannels(pid, cid, channelsInfo);
}
} // namespace AverageRelativeFlowKernels
AverageRelative3D::AverageRelative3D(
const MirState *state, std::string name, std::vector<std::string> pvNames,
std::vector<std::string> channelNames, int sampleEvery,
int dumpEvery, real3 binSize, std::string relativeOVname, int relativeID) :
Average3D(state, name, pvNames, channelNames, sampleEvery, dumpEvery, binSize),
relativeOVname_(relativeOVname),
relativeID_(relativeID)
{}
void AverageRelative3D::setup(Simulation* simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
Average3D::setup(simulation, comm, interComm);
int local_size = numberDensity_.size();
int global_size = local_size * nranks_;
localNumberDensity_ .resize(local_size);
numberDensity_ .resize_anew(global_size);
accumulatedNumberDensity_.resize_anew(global_size);
numberDensity_.clear(defaultStream);
localChannels_.resize(channelsInfo_.n);
for (int i = 0; i < channelsInfo_.n; ++i)
{
local_size = channelsInfo_.average[i].size();
global_size = local_size * nranks_;
localChannels_[i].resize(local_size);
channelsInfo_.average[i].resize_anew(global_size);
accumulatedAverage_ [i].resize_anew(global_size);
channelsInfo_.average[i].clear(defaultStream);
channelsInfo_.averagePtrs[i] = channelsInfo_.average[i].devPtr();
}
channelsInfo_.averagePtrs.uploadToDevice(defaultStream);
channelsInfo_.types.uploadToDevice(defaultStream);
// Relative stuff
relativeOV_ = simulation->getOVbyNameOrDie(relativeOVname_);
if ( !relativeOV_->local()->dataPerObject.checkChannelExists(ChannelNames::motions) )
die("Only rigid objects are supported for relative flow, but got OV '%s'", relativeOV_->getCName());
const int locsize = relativeOV_->local()->getNumObjects();
int totsize {0};
MPI_Check( MPI_Reduce(&locsize, &totsize, 1, MPI_INT, MPI_SUM, 0, comm) );
if (rank_ == 0 && relativeID_ >= totsize)
die("Too few objects in OV '%s' (only %d); but requested id %d",
relativeOV_->getCName(), totsize, relativeID_);
}
void AverageRelative3D::sampleOnePv(real3 relativeParam, ParticleVector *pv, cudaStream_t stream)
{
const CellListInfo cinfo(binSize_, getState()->domain.globalSize);
PVview pvView(pv, pv->local());
ChannelsInfo gpuInfo(channelsInfo_, pv, stream);
const int nthreads = 128;
SAFE_KERNEL_LAUNCH
(AverageRelativeFlowKernels::sampleRelative,
getNblocks(pvView.size, nthreads), nthreads, 0, stream,
pvView, cinfo, numberDensity_.devPtr(), gpuInfo, relativeParam);
}
void AverageRelative3D::afterIntegration(cudaStream_t stream)
{
const int TAG = 22;
const int NCOMPONENTS = 2 * sizeof(real3) / sizeof(real);
if (!isTimeEvery(getState(), sampleEvery_)) return;
debug2("Plugin %s is sampling now", getCName());
real3 relativeParams[2] = {make_real3(0.0_r), make_real3(0.0_r)};
// Find and broadcast the position and velocity of the relative object
MPI_Request req;
MPI_Check( MPI_Irecv(relativeParams, NCOMPONENTS, getMPIFloatType<real>(), MPI_ANY_SOURCE, TAG, comm_, &req) );
auto ids = relativeOV_->local()->dataPerObject.getData<int64_t>(ChannelNames::globalIds);
auto motions = relativeOV_->local()->dataPerObject.getData<RigidMotion>(ChannelNames::motions);
ids ->downloadFromDevice(stream, ContainersSynch::Asynch);
motions->downloadFromDevice(stream, ContainersSynch::Synch);
for (size_t i = 0; i < ids->size(); i++)
{
if ((*ids)[i] == relativeID_)
{
real3 params[2] = { make_real3( (*motions)[i].r ),
make_real3( (*motions)[i].vel ) };
params[0] = getState()->domain.local2global(params[0]);
for (int r = 0; r < nranks_; r++)
MPI_Send(¶ms, NCOMPONENTS, getMPIFloatType<real>(), r, TAG, comm_);
break;
}
}
MPI_Check( MPI_Wait(&req, MPI_STATUS_IGNORE) );
relativeParams[0] = getState()->domain.global2local(relativeParams[0]);
for (auto& pv : pvs_)
sampleOnePv(relativeParams[0], pv, stream);
accumulateSampledAndClear(stream);
averageRelativeVelocity_ += relativeParams[1];
nSamples_++;
}
void AverageRelative3D::extractLocalBlock()
{
static const double scale_by_density = -1.0;
auto oneChannel = [this] (const PinnedBuffer<double>& channel, Average3D::ChannelType type, double scale, std::vector<double>& dest) {
MPI_Check( MPI_Allreduce(MPI_IN_PLACE, channel.hostPtr(), channel.size(), MPI_DOUBLE, MPI_SUM, comm_) );
const int ncomponents = this->getNcomponents(type);
const int3 globalResolution = resolution_ * nranks3D_;
double factor;
int dstId = 0;
for (int k = rank3D_.z * resolution_.z; k < (rank3D_.z+1) * resolution_.z; ++k)
{
for (int j = rank3D_.y * resolution_.y; j < (rank3D_.y+1) * resolution_.y; ++j)
{
for (int i = rank3D_.x * resolution_.x; i < (rank3D_.x+1) * resolution_.x; ++i)
{
const int scalId = (k*globalResolution.y*globalResolution.x + j*globalResolution.x + i);
int srcId = ncomponents * scalId;
for (int c = 0; c < ncomponents; ++c)
{
if (scale == scale_by_density) factor = 1.0_r / accumulatedNumberDensity_[scalId];
else factor = scale;
dest[dstId++] = channel[srcId] * factor;
srcId++;
}
}
}
}
};
// Order is important! Density comes first
oneChannel(accumulatedNumberDensity_, Average3D::ChannelType::Scalar, 1.0 / (nSamples_ * binSize_.x*binSize_.y*binSize_.z), localNumberDensity_);
for (int i = 0; i < channelsInfo_.n; ++i)
oneChannel(accumulatedAverage_[i], channelsInfo_.types[i], scale_by_density, localChannels_[i]);
}
void AverageRelative3D::serializeAndSend(cudaStream_t stream)
{
if (!isTimeEvery(getState(), dumpEvery_)) return;
for (int i = 0; i < channelsInfo_.n; ++i)
{
auto& data = accumulatedAverage_[i];
if (channelsInfo_.names[i] == ChannelNames::velocities)
{
constexpr int nthreads = 128;
const int numVec3 = data.size() / 3;
SAFE_KERNEL_LAUNCH
(SamplingHelpersKernels::correctVelocity,
getNblocks(numVec3, nthreads), nthreads, 0, stream,
numVec3, reinterpret_cast<double3*> (data.devPtr()),
accumulatedNumberDensity_.devPtr(), averageRelativeVelocity_ / static_cast<real>(nSamples_));
averageRelativeVelocity_ = make_real3(0.0_r);
}
}
accumulatedNumberDensity_.downloadFromDevice(stream, ContainersSynch::Asynch);
accumulatedNumberDensity_.clearDevice(stream);
for (auto& data : accumulatedAverage_)
{
data.downloadFromDevice(stream, ContainersSynch::Asynch);
data.clearDevice(stream);
}
CUDA_Check( cudaStreamSynchronize(stream) );
extractLocalBlock();
nSamples_ = 0;
MirState::StepType timeStamp = getTimeStamp(getState(), dumpEvery_) - 1; // -1 to start from 0
debug2("Plugin '%s' is now packing the data", getCName());
waitPrevSend();
SimpleSerializer::serialize(sendBuffer_, getState()->currentTime, timeStamp, localNumberDensity_, localChannels_);
send(sendBuffer_);
}
} // namespace mirheo
|
d99727e17eae473d3659b7e238ac9db4d03e903d.hip
|
// !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include <matx.h>
using namespace matx;
int main() {
tensorShape_t<2> shape({2, 3});
tensor_t<float, 2> A(shape);
tensor_t<float, 2> B(shape);
tensor_t<float, 1> V({3});
/****************************************************************************************************
* Initialize tensor A with increasing values from 0.5 to 3.0 in steps of 0.5,
*and tensor V from -1 to -3 in steps of -1.
****************************************************************************************************/
A.SetVals({{0.5, 1, 1.5}, {2.0, 2.5, 3.0}});
V.SetVals({-1, -2, -3});
/*** End editing ***/
// Verify init is correct
float step = 0.5;
for (int row = 0; row < A.Size(0); row++) {
for (int col = 0; col < A.Size(1); col++) {
if (A(row, col) != step) {
printf("Mismatch in A init view! actual = %f, expected = %f\n",
A(row, col), step);
exit(-1);
}
step += 0.5;
}
}
for (int col = 0; col < V.Size(0); col++) {
if (V(col) != (-1 + col * -1)) {
printf("Mismatch in A init view! actual = %f, expected = %f\n", V(col),
(float)(-1 + col * -1));
exit(-1);
}
}
print(A);
print(V);
printf("Init verification passed!\n");
/****************************************************************************************************
* Add 5.0 to all elements of A and store the results back in A
****************************************************************************************************/
(A = A + 5.0).run();
/*** End editing ***/
hipStreamSynchronize(0);
step = 0.5;
for (int row = 0; row < A.Size(0); row++) {
for (int col = 0; col < A.Size(1); col++) {
if (A(row, col) != (5.0 + step)) {
printf("Mismatch in A sum view! actual = %f, expected = %f\n",
A(row, col), 5.0 + step);
exit(-1);
}
step += 0.5;
}
}
print(A);
printf("Sum verification passed!\n");
/****************************************************************************************************
* Clone V to match the dimensions of A, and subtract V from A. The results
* should be stored in A
*
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/quickstart.html#increasing-dimensionality
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/api/tensorview.html#_CPPv4I0_iEN4matx12tensor_tE
*
****************************************************************************************************/
auto tvs = V.Clone<2>({A.Size(0), matxKeepDim});
(A = A - tvs).run();
/*** End editing ***/
hipStreamSynchronize(0);
step = 0.5;
for (int row = 0; row < A.Size(0); row++) {
for (int col = 0; col < A.Size(1); col++) {
if (A(row, col) != (5.0 + step - tvs(row, col))) {
printf("Mismatch in A sub view! actual = %f, expected = %f\n",
A(row, col), 5.0 + step - tvs(row, col));
exit(-1);
}
step += 0.5;
}
}
print(A);
print(tvs);
printf("Clone verification passed!\n");
/****************************************************************************************************
* Raise the matrix A to the power of 2 and multiply the output by two. Next,
* subtract the vector V from each row. Store the result in tensor B.
*
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/api/tensorops.html#_CPPv4N4matx3powE2Op2Op
****************************************************************************************************/
(B = (pow(A, 2) * 2) - V).run();
/*** End editing ***/
hipStreamSynchronize(0);
for (int row = 0; row < B.Size(0); row++) {
for (int col = 0; col < B.Size(1); col++) {
if (B(row, col) != powf(A(row, col), 2) * 2 - V(col)) {
printf("Mismatch in B init view! actual = %f, expected = %f\n",
B(row, col), powf(A(row, col), 2) * 2 - V(col));
exit(-1);
}
}
}
print(B);
printf("Mixed verification passed!\n");
return 0;
}
|
d99727e17eae473d3659b7e238ac9db4d03e903d.cu
|
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include <matx.h>
using namespace matx;
int main() {
tensorShape_t<2> shape({2, 3});
tensor_t<float, 2> A(shape);
tensor_t<float, 2> B(shape);
tensor_t<float, 1> V({3});
/****************************************************************************************************
* Initialize tensor A with increasing values from 0.5 to 3.0 in steps of 0.5,
*and tensor V from -1 to -3 in steps of -1.
****************************************************************************************************/
A.SetVals({{0.5, 1, 1.5}, {2.0, 2.5, 3.0}});
V.SetVals({-1, -2, -3});
/*** End editing ***/
// Verify init is correct
float step = 0.5;
for (int row = 0; row < A.Size(0); row++) {
for (int col = 0; col < A.Size(1); col++) {
if (A(row, col) != step) {
printf("Mismatch in A init view! actual = %f, expected = %f\n",
A(row, col), step);
exit(-1);
}
step += 0.5;
}
}
for (int col = 0; col < V.Size(0); col++) {
if (V(col) != (-1 + col * -1)) {
printf("Mismatch in A init view! actual = %f, expected = %f\n", V(col),
(float)(-1 + col * -1));
exit(-1);
}
}
print(A);
print(V);
printf("Init verification passed!\n");
/****************************************************************************************************
* Add 5.0 to all elements of A and store the results back in A
****************************************************************************************************/
(A = A + 5.0).run();
/*** End editing ***/
cudaStreamSynchronize(0);
step = 0.5;
for (int row = 0; row < A.Size(0); row++) {
for (int col = 0; col < A.Size(1); col++) {
if (A(row, col) != (5.0 + step)) {
printf("Mismatch in A sum view! actual = %f, expected = %f\n",
A(row, col), 5.0 + step);
exit(-1);
}
step += 0.5;
}
}
print(A);
printf("Sum verification passed!\n");
/****************************************************************************************************
* Clone V to match the dimensions of A, and subtract V from A. The results
* should be stored in A
*
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/quickstart.html#increasing-dimensionality
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/api/tensorview.html#_CPPv4I0_iEN4matx12tensor_tE
*
****************************************************************************************************/
auto tvs = V.Clone<2>({A.Size(0), matxKeepDim});
(A = A - tvs).run();
/*** End editing ***/
cudaStreamSynchronize(0);
step = 0.5;
for (int row = 0; row < A.Size(0); row++) {
for (int col = 0; col < A.Size(1); col++) {
if (A(row, col) != (5.0 + step - tvs(row, col))) {
printf("Mismatch in A sub view! actual = %f, expected = %f\n",
A(row, col), 5.0 + step - tvs(row, col));
exit(-1);
}
step += 0.5;
}
}
print(A);
print(tvs);
printf("Clone verification passed!\n");
/****************************************************************************************************
* Raise the matrix A to the power of 2 and multiply the output by two. Next,
* subtract the vector V from each row. Store the result in tensor B.
*
* https://devtech-compute.gitlab-master-pages.nvidia.com/matx/api/tensorops.html#_CPPv4N4matx3powE2Op2Op
****************************************************************************************************/
(B = (pow(A, 2) * 2) - V).run();
/*** End editing ***/
cudaStreamSynchronize(0);
for (int row = 0; row < B.Size(0); row++) {
for (int col = 0; col < B.Size(1); col++) {
if (B(row, col) != powf(A(row, col), 2) * 2 - V(col)) {
printf("Mismatch in B init view! actual = %f, expected = %f\n",
B(row, col), powf(A(row, col), 2) * 2 - V(col));
exit(-1);
}
}
}
print(B);
printf("Mixed verification passed!\n");
return 0;
}
|
6349c67c3ecf20930cbd6c001e2de699062e75db.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../../01/common/common_vc.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* Display a variety of information on the first CUDA device in this system,
* including driver version, runtime version, compute capability, bytes of
* global memory, etc.
*/
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess) {
printf("hipGetDeviceCount returned %d\n-> %s\n",
(int)error_id, hipGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int dev = 0, driverVersion = 0, runtimeVersion = 0;
CHECK(hipSetDevice(dev));
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem / pow(1024.0, 3),
(unsigned long long)deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f Mhz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n",
deviceProp.maxTexture1D, deviceProp.maxTexture2D[0],
deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0],
deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1],
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
exit(EXIT_SUCCESS);
}
|
6349c67c3ecf20930cbd6c001e2de699062e75db.cu
|
#include "../../01/common/common_vc.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* Display a variety of information on the first CUDA device in this system,
* including driver version, runtime version, compute capability, bytes of
* global memory, etc.
*/
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
printf("cudaGetDeviceCount returned %d\n-> %s\n",
(int)error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int dev = 0, driverVersion = 0, runtimeVersion = 0;
CHECK(cudaSetDevice(dev));
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Device %d: \"%s\"\n", dev, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Total amount of global memory: %.2f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem / pow(1024.0, 3),
(unsigned long long)deviceProp.totalGlobalMem);
printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf(" Memory Clock rate: %.0f Mhz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize)
{
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n",
deviceProp.maxTexture1D, deviceProp.maxTexture2D[0],
deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0],
deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1],
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
exit(EXIT_SUCCESS);
}
|
7495ca775a5b873d0b2a2c3bd508f29e408780c5.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Shared memory speeds up performance when we need to access data frequently.
Here, the 1D stencil kernel adds all its neighboring data within a radius.
The C model is added to verify the stencil result on a GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
#define RADIUS 7
#define BLOCK_SIZE 256
__global__
void stencil_1d(const int *__restrict__ in, int *__restrict__ out)
{
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
// Read input elements into shared memory
temp[lindex] = in[gindex];
// At both end of a block, the sliding window moves beyond the block boundary.
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = (gindex < RADIUS) ? 0 : in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
// Synchronize (ensure all the threads will be completed before continue)
__syncthreads();
// Apply the 1D stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex] = result;
}
int main(int argc, char* argv[]) {
if (argc != 3) {
printf("Usage: %s <length> <repeat>\n", argv[0]);
printf("length is a multiple of %d\n", BLOCK_SIZE);
return 1;
}
const int length = atoi(argv[1]);
const int repeat = atoi(argv[2]);
int size = length * sizeof(int);
int pad_size = (length + RADIUS) * sizeof(int);
int *a, *b;
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(pad_size);
b = (int *)malloc(size);
for (int i = 0; i < length+RADIUS; i++) a[i] = i;
int *d_a, *d_b;
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, pad_size);
hipMalloc((void **)&d_b, size);
// Copy inputs to device
hipMemcpy(d_a, a, pad_size, hipMemcpyHostToDevice);
dim3 grids (length/BLOCK_SIZE);
dim3 blocks (BLOCK_SIZE);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
// Launch kernel on GPU
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( stencil_1d) , dim3(grids), dim3(blocks) , 0, 0, d_a, d_b);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
// Copy result back to host
hipMemcpy(b, d_b, size, hipMemcpyDeviceToHost);
// verification
bool ok = true;
for (int i = 0; i < 2*RADIUS; i++) {
int s = 0;
for (int j = i; j <= i+2*RADIUS; j++)
s += j < RADIUS ? 0 : (a[j] - RADIUS);
if (s != b[i]) {
printf("Error at %d: %d (host) != %d (device)\n", i, s, b[i]);
ok = false;
break;
}
}
for (int i = 2*RADIUS; i < length; i++) {
int s = 0;
for (int j = i-RADIUS; j <= i+RADIUS; j++)
s += a[j];
if (s != b[i]) {
printf("Error at %d: %d (host) != %d (device)\n", i, s, b[i]);
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
// Cleanup
free(a);
free(b);
hipFree(d_a);
hipFree(d_b);
return 0;
}
|
7495ca775a5b873d0b2a2c3bd508f29e408780c5.cu
|
/*
Shared memory speeds up performance when we need to access data frequently.
Here, the 1D stencil kernel adds all its neighboring data within a radius.
The C model is added to verify the stencil result on a GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cuda.h>
#define RADIUS 7
#define BLOCK_SIZE 256
__global__
void stencil_1d(const int *__restrict__ in, int *__restrict__ out)
{
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
// Read input elements into shared memory
temp[lindex] = in[gindex];
// At both end of a block, the sliding window moves beyond the block boundary.
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = (gindex < RADIUS) ? 0 : in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
// Synchronize (ensure all the threads will be completed before continue)
__syncthreads();
// Apply the 1D stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex] = result;
}
int main(int argc, char* argv[]) {
if (argc != 3) {
printf("Usage: %s <length> <repeat>\n", argv[0]);
printf("length is a multiple of %d\n", BLOCK_SIZE);
return 1;
}
const int length = atoi(argv[1]);
const int repeat = atoi(argv[2]);
int size = length * sizeof(int);
int pad_size = (length + RADIUS) * sizeof(int);
int *a, *b;
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(pad_size);
b = (int *)malloc(size);
for (int i = 0; i < length+RADIUS; i++) a[i] = i;
int *d_a, *d_b;
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, pad_size);
cudaMalloc((void **)&d_b, size);
// Copy inputs to device
cudaMemcpy(d_a, a, pad_size, cudaMemcpyHostToDevice);
dim3 grids (length/BLOCK_SIZE);
dim3 blocks (BLOCK_SIZE);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
// Launch kernel on GPU
for (int i = 0; i < repeat; i++)
stencil_1d <<< grids, blocks >>> (d_a, d_b);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (s)\n", (time * 1e-9f) / repeat);
// Copy result back to host
cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost);
// verification
bool ok = true;
for (int i = 0; i < 2*RADIUS; i++) {
int s = 0;
for (int j = i; j <= i+2*RADIUS; j++)
s += j < RADIUS ? 0 : (a[j] - RADIUS);
if (s != b[i]) {
printf("Error at %d: %d (host) != %d (device)\n", i, s, b[i]);
ok = false;
break;
}
}
for (int i = 2*RADIUS; i < length; i++) {
int s = 0;
for (int j = i-RADIUS; j <= i+RADIUS; j++)
s += a[j];
if (s != b[i]) {
printf("Error at %d: %d (host) != %d (device)\n", i, s, b[i]);
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
// Cleanup
free(a);
free(b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
534f734549ce4a4a2a5920503ea8182bfa2008e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/aggregation/aggregation.cuh>
#include <cudf/aggregation.hpp>
#include <rolling/rolling_detail.hpp>
#include <cudf/rolling.hpp>
#include <cudf/utilities/nvtx_utils.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/copying.hpp>
#include <jit/type.h>
#include <jit/launcher.h>
#include <jit/parser.h>
#include <rolling/jit/code/code.h>
#include <types.hpp.jit>
#include <bit.hpp.jit>
#include <rmm/device_scalar.hpp>
#include <memory>
namespace cudf {
namespace experimental {
namespace detail {
namespace { // anonymous
/**
* @brief Computes the rolling window function
*
* @tparam ColumnType Datatype of values pointed to by the pointers
* @tparam agg_op A functor that defines the aggregation operation
* @tparam is_mean Compute mean=sum/count across all valid elements in the window
* @tparam block_size CUDA block size for the kernel
* @tparam has_nulls true if the input column has nulls
* @tparam WindowIterator iterator type (inferred)
* @param input Input column device view
* @param output Output column device view
* @param preceding_window_begin[in] Rolling window size iterator, accumulates from
* in_col[i-preceding_window] to in_col[i] inclusive
* @param following_window_begin[in] Rolling window size iterator in the forward
* direction, accumulates from in_col[i] to
* in_col[i+following_window] inclusive
* @param min_periods[in] Minimum number of observations in window required to
* have a value, otherwise 0 is stored in the valid bit mask
*/
template <typename T, typename agg_op, aggregation::Kind op, int block_size, bool has_nulls,
typename WindowIterator>
__launch_bounds__(block_size)
__global__
void gpu_rolling(column_device_view input,
mutable_column_device_view output,
size_type * __restrict__ output_valid_count,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods)
{
size_type i = blockIdx.x * block_size + threadIdx.x;
size_type stride = block_size * gridDim.x;
size_type warp_valid_count{0};
auto active_threads = __ballot_sync(0xffffffff, i < input.size());
while(i < input.size())
{
T val = agg_op::template identity<T>();
// declare this as volatile to avoid some compiler optimizations that lead to incorrect results
// for CUDA 10.0 and below (fixed in CUDA 10.1)
volatile cudf::size_type count = 0;
size_type preceding_window = preceding_window_begin[i];
size_type following_window = following_window_begin[i];
// compute bounds
size_type start = max(0, i - preceding_window);
size_type end = min(input.size(), i + following_window + 1);
size_type start_index = min(start, end);
size_type end_index = max(start, end);
// aggregate
// TODO: We should explore using shared memory to avoid redundant loads.
// This might require separating the kernel into a special version
// for dynamic and static sizes.
for (size_type j = start_index; j < end_index; j++) {
if (!has_nulls || input.is_valid(j)) {
// Element type and output type are different for COUNT
T element = (op == aggregation::COUNT) ? T{0} : input.element<T>(j);
val = agg_op{}(element, val);
count++;
}
}
// check if we have enough input samples
bool output_is_valid = (count >= min_periods);
// set the mask
cudf::bitmask_type result_mask{__ballot_sync(active_threads, output_is_valid)};
// only one thread writes the mask
if (0 == threadIdx.x % cudf::experimental::detail::warp_size) {
output.set_mask_word(cudf::word_index(i), result_mask);
warp_valid_count += __popc(result_mask);
}
// store the output value, one per thread
if (output_is_valid)
cudf::detail::store_output_functor<T, op == aggregation::MEAN>{}(output.element<T>(i),
val, count);
// process next element
i += stride;
active_threads = __ballot_sync(active_threads, i < input.size());
}
// sum the valid counts across the whole block
size_type block_valid_count =
cudf::experimental::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if(threadIdx.x == 0) {
atomicAdd(output_valid_count, block_valid_count);
}
}
template <typename InputType>
struct rolling_window_launcher
{
template <typename T, typename agg_op, aggregation::Kind op, typename WindowIterator>
std::enable_if_t<cudf::detail::is_supported<T, agg_op,
op, op == aggregation::MEAN>(), std::unique_ptr<column>>
launch(column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource *mr,
hipStream_t stream) {
if (input.is_empty()) return empty_like(input);
cudf::nvtx::range_push("CUDF_ROLLING_WINDOW", cudf::nvtx::color::ORANGE);
min_periods = ::max(min_periods, 1);
// output is always nullable, COUNT always INT32 output
std::unique_ptr<column> output = (op == aggregation::COUNT) ?
make_numeric_column(cudf::data_type{cudf::INT32}, input.size(),
cudf::UNINITIALIZED, stream, mr) :
cudf::experimental::detail::allocate_like(input, input.size(),
cudf::experimental::mask_allocation_policy::ALWAYS, mr, stream);
constexpr cudf::size_type block_size = 256;
cudf::experimental::detail::grid_1d grid(input.size(), block_size);
auto input_device_view = column_device_view::create(input, stream);
auto output_device_view = mutable_column_device_view::create(*output, stream);
rmm::device_scalar<size_type> device_valid_count{0, stream};
if (input.has_nulls()) {
if (op == aggregation::COUNT) {
hipLaunchKernelGGL(( gpu_rolling<size_type, agg_op, op, block_size, true>), dim3(grid.num_blocks), dim3(block_size), 0, stream,
*input_device_view, *output_device_view, device_valid_count.data(),
preceding_window_begin, following_window_begin, min_periods);
}
else {
hipLaunchKernelGGL(( gpu_rolling<InputType, agg_op, op, block_size, true>), dim3(grid.num_blocks), dim3(block_size), 0, stream,
*input_device_view, *output_device_view, device_valid_count.data(),
preceding_window_begin, following_window_begin, min_periods);
}
} else {
if (op == aggregation::COUNT) {
hipLaunchKernelGGL(( gpu_rolling<size_type, agg_op, op, block_size, false>), dim3(grid.num_blocks), dim3(block_size), 0, stream,
*input_device_view, *output_device_view, device_valid_count.data(),
preceding_window_begin, following_window_begin, min_periods);
}
else {
hipLaunchKernelGGL(( gpu_rolling<InputType, agg_op, op, block_size, false>), dim3(grid.num_blocks), dim3(block_size), 0, stream,
*input_device_view, *output_device_view, device_valid_count.data(),
preceding_window_begin, following_window_begin, min_periods);
}
}
output->set_null_count(output->size() - device_valid_count.value(stream));
// check the stream for debugging
CHECK_CUDA(stream);
cudf::nvtx::range_pop();
return std::move(output);
}
template <typename T, typename agg_op, aggregation::Kind op, typename WindowIterator>
std::enable_if_t<!cudf::detail::is_supported<T, agg_op,
op, op == aggregation::MEAN>(), std::unique_ptr<column>>
launch (column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource *mr,
hipStream_t stream) {
CUDF_FAIL("Aggregation operator and/or input type combination is invalid");
}
template<aggregation::Kind op, typename WindowIterator>
std::enable_if_t<!(op == aggregation::MEAN), std::unique_ptr<column>>
operator()(column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource *mr,
hipStream_t stream)
{
return launch <InputType, typename corresponding_operator<op>::type, op, WindowIterator> (
input,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
mr,
stream);
}
template<aggregation::Kind op, typename WindowIterator>
std::enable_if_t<(op == aggregation::MEAN), std::unique_ptr<column>>
operator()(column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource *mr,
hipStream_t stream) {
return launch <InputType, cudf::DeviceSum, op, WindowIterator> (
input,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
mr,
stream);
}
};
struct dispatch_rolling {
template <typename T, typename WindowIterator>
std::unique_ptr<column> operator()(column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource *mr,
hipStream_t stream) {
return aggregation_dispatcher(agg->kind, rolling_window_launcher<T>{},
input,
preceding_window_begin, following_window_begin,
min_periods, agg, mr, stream);
}
};
} // namespace anonymous
// Applies a user-defined rolling window function to the values in a column.
template <bool static_window, typename WindowIterator>
std::unique_ptr<column> rolling_window_udf(column_view const &input,
WindowIterator preceding_window,
WindowIterator following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0)
{
static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(),
"bitmask_type size does not match CUDA warp size");
if (input.has_nulls())
CUDF_FAIL("Currently the UDF version of rolling window does NOT support inputs with nulls.");
cudf::nvtx::range_push("CUDF_ROLLING_WINDOW", cudf::nvtx::color::ORANGE);
min_periods = ::max(min_periods, 1);
auto udf_agg = static_cast<udf_aggregation*>(agg.get());
std::string hash = "prog_experimental_rolling."
+ std::to_string(std::hash<std::string>{}(udf_agg->_source));
std::string cuda_source;
switch(udf_agg->kind){
case aggregation::Kind::PTX:
cuda_source = cudf::experimental::rolling::jit::code::kernel_headers;
cuda_source += cudf::jit::parse_single_function_ptx(udf_agg->_source, udf_agg->_function_name,
cudf::jit::get_type_name(udf_agg->_output_type),
{0, 5}); // args 0 and 5 are pointers.
cuda_source += cudf::experimental::rolling::jit::code::kernel;
break;
case aggregation::Kind::CUDA:
cuda_source = cudf::experimental::rolling::jit::code::kernel_headers;
cuda_source += cudf::jit::parse_single_function_cuda(udf_agg->_source, udf_agg->_function_name);
cuda_source += cudf::experimental::rolling::jit::code::kernel;
break;
default:
CUDF_FAIL("Unsupported UDF type.");
}
std::unique_ptr<column> output = make_numeric_column(udf_agg->_output_type, input.size(),
cudf::UNINITIALIZED, stream, mr);
auto output_view = output->mutable_view();
rmm::device_scalar<size_type> device_valid_count{0, stream};
// Launch the jitify kernel
cudf::jit::launcher(hash, cuda_source,
{ cudf_types_hpp, cudf_utilities_bit_hpp,
cudf::experimental::rolling::jit::code::operation_h },
{ "-std=c++14", "-w" }, nullptr, stream)
.set_kernel_inst("gpu_rolling_new", // name of the kernel we are launching
{ cudf::jit::get_type_name(input.type()), // list of template arguments
cudf::jit::get_type_name(output->type()),
udf_agg->_operator_name,
static_window ? "cudf::size_type" : "cudf::size_type*"})
.launch(input.size(), cudf::jit::get_data_ptr(input), input.null_mask(),
cudf::jit::get_data_ptr(output_view), output_view.null_mask(),
device_valid_count.data(), preceding_window, following_window, min_periods);
output->set_null_count(output->size() - device_valid_count.value(stream));
// check the stream for debugging
CHECK_CUDA(stream);
cudf::nvtx::range_pop();
return output;
}
// Applies a rolling window function to the values in a column.
template <typename WindowIterator>
std::unique_ptr<column> rolling_window(column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0)
{
static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(),
"bitmask_type size does not match CUDA warp size");
return cudf::experimental::type_dispatcher(input.type(),
dispatch_rolling{},
input,
preceding_window_begin,
following_window_begin,
min_periods, agg, mr, stream);
}
} // namespace detail
// Applies a fixed-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
size_type preceding_window,
size_type following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS((min_periods >= 0), "min_periods must be non-negative");
if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) {
return cudf::experimental::detail::rolling_window_udf<true>(input,
preceding_window,
following_window,
min_periods, agg, mr, 0);
} else {
auto preceding_window_begin = thrust::make_constant_iterator(preceding_window);
auto following_window_begin = thrust::make_constant_iterator(following_window);
return cudf::experimental::detail::rolling_window(input,
preceding_window_begin,
following_window_begin,
min_periods, agg, mr, 0);
}
}
// Applies a variable-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& preceding_window,
column_view const& following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr)
{
if (preceding_window.size() == 0 || following_window.size() == 0) return empty_like(input);
CUDF_EXPECTS(preceding_window.type().id() == INT32 && following_window.type().id() == INT32,
"preceding_window/following_window must have INT32 type");
CUDF_EXPECTS(preceding_window.size() == input.size() && following_window.size() == input.size(),
"preceding_window/following_window size must match input size");
if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) {
return cudf::experimental::detail::rolling_window_udf<false>(input,
preceding_window.begin<size_type>(),
following_window.begin<size_type>(),
min_periods, agg, mr, 0);
} else {
return cudf::experimental::detail::rolling_window(input,
preceding_window.begin<size_type>(),
following_window.begin<size_type>(),
min_periods, agg, mr, 0);
}
}
} // namespace experimental
} // namespace cudf
|
534f734549ce4a4a2a5920503ea8182bfa2008e5.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/aggregation/aggregation.cuh>
#include <cudf/aggregation.hpp>
#include <rolling/rolling_detail.hpp>
#include <cudf/rolling.hpp>
#include <cudf/utilities/nvtx_utils.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/copying.hpp>
#include <jit/type.h>
#include <jit/launcher.h>
#include <jit/parser.h>
#include <rolling/jit/code/code.h>
#include <types.hpp.jit>
#include <bit.hpp.jit>
#include <rmm/device_scalar.hpp>
#include <memory>
namespace cudf {
namespace experimental {
namespace detail {
namespace { // anonymous
/**
* @brief Computes the rolling window function
*
* @tparam ColumnType Datatype of values pointed to by the pointers
* @tparam agg_op A functor that defines the aggregation operation
* @tparam is_mean Compute mean=sum/count across all valid elements in the window
* @tparam block_size CUDA block size for the kernel
* @tparam has_nulls true if the input column has nulls
* @tparam WindowIterator iterator type (inferred)
* @param input Input column device view
* @param output Output column device view
* @param preceding_window_begin[in] Rolling window size iterator, accumulates from
* in_col[i-preceding_window] to in_col[i] inclusive
* @param following_window_begin[in] Rolling window size iterator in the forward
* direction, accumulates from in_col[i] to
* in_col[i+following_window] inclusive
* @param min_periods[in] Minimum number of observations in window required to
* have a value, otherwise 0 is stored in the valid bit mask
*/
template <typename T, typename agg_op, aggregation::Kind op, int block_size, bool has_nulls,
typename WindowIterator>
__launch_bounds__(block_size)
__global__
void gpu_rolling(column_device_view input,
mutable_column_device_view output,
size_type * __restrict__ output_valid_count,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods)
{
size_type i = blockIdx.x * block_size + threadIdx.x;
size_type stride = block_size * gridDim.x;
size_type warp_valid_count{0};
auto active_threads = __ballot_sync(0xffffffff, i < input.size());
while(i < input.size())
{
T val = agg_op::template identity<T>();
// declare this as volatile to avoid some compiler optimizations that lead to incorrect results
// for CUDA 10.0 and below (fixed in CUDA 10.1)
volatile cudf::size_type count = 0;
size_type preceding_window = preceding_window_begin[i];
size_type following_window = following_window_begin[i];
// compute bounds
size_type start = max(0, i - preceding_window);
size_type end = min(input.size(), i + following_window + 1);
size_type start_index = min(start, end);
size_type end_index = max(start, end);
// aggregate
// TODO: We should explore using shared memory to avoid redundant loads.
// This might require separating the kernel into a special version
// for dynamic and static sizes.
for (size_type j = start_index; j < end_index; j++) {
if (!has_nulls || input.is_valid(j)) {
// Element type and output type are different for COUNT
T element = (op == aggregation::COUNT) ? T{0} : input.element<T>(j);
val = agg_op{}(element, val);
count++;
}
}
// check if we have enough input samples
bool output_is_valid = (count >= min_periods);
// set the mask
cudf::bitmask_type result_mask{__ballot_sync(active_threads, output_is_valid)};
// only one thread writes the mask
if (0 == threadIdx.x % cudf::experimental::detail::warp_size) {
output.set_mask_word(cudf::word_index(i), result_mask);
warp_valid_count += __popc(result_mask);
}
// store the output value, one per thread
if (output_is_valid)
cudf::detail::store_output_functor<T, op == aggregation::MEAN>{}(output.element<T>(i),
val, count);
// process next element
i += stride;
active_threads = __ballot_sync(active_threads, i < input.size());
}
// sum the valid counts across the whole block
size_type block_valid_count =
cudf::experimental::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if(threadIdx.x == 0) {
atomicAdd(output_valid_count, block_valid_count);
}
}
template <typename InputType>
struct rolling_window_launcher
{
template <typename T, typename agg_op, aggregation::Kind op, typename WindowIterator>
std::enable_if_t<cudf::detail::is_supported<T, agg_op,
op, op == aggregation::MEAN>(), std::unique_ptr<column>>
launch(column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource *mr,
cudaStream_t stream) {
if (input.is_empty()) return empty_like(input);
cudf::nvtx::range_push("CUDF_ROLLING_WINDOW", cudf::nvtx::color::ORANGE);
min_periods = std::max(min_periods, 1);
// output is always nullable, COUNT always INT32 output
std::unique_ptr<column> output = (op == aggregation::COUNT) ?
make_numeric_column(cudf::data_type{cudf::INT32}, input.size(),
cudf::UNINITIALIZED, stream, mr) :
cudf::experimental::detail::allocate_like(input, input.size(),
cudf::experimental::mask_allocation_policy::ALWAYS, mr, stream);
constexpr cudf::size_type block_size = 256;
cudf::experimental::detail::grid_1d grid(input.size(), block_size);
auto input_device_view = column_device_view::create(input, stream);
auto output_device_view = mutable_column_device_view::create(*output, stream);
rmm::device_scalar<size_type> device_valid_count{0, stream};
if (input.has_nulls()) {
if (op == aggregation::COUNT) {
gpu_rolling<size_type, agg_op, op, block_size, true><<<grid.num_blocks, block_size, 0, stream>>>
(*input_device_view, *output_device_view, device_valid_count.data(),
preceding_window_begin, following_window_begin, min_periods);
}
else {
gpu_rolling<InputType, agg_op, op, block_size, true><<<grid.num_blocks, block_size, 0, stream>>>
(*input_device_view, *output_device_view, device_valid_count.data(),
preceding_window_begin, following_window_begin, min_periods);
}
} else {
if (op == aggregation::COUNT) {
gpu_rolling<size_type, agg_op, op, block_size, false><<<grid.num_blocks, block_size, 0, stream>>>
(*input_device_view, *output_device_view, device_valid_count.data(),
preceding_window_begin, following_window_begin, min_periods);
}
else {
gpu_rolling<InputType, agg_op, op, block_size, false><<<grid.num_blocks, block_size, 0, stream>>>
(*input_device_view, *output_device_view, device_valid_count.data(),
preceding_window_begin, following_window_begin, min_periods);
}
}
output->set_null_count(output->size() - device_valid_count.value(stream));
// check the stream for debugging
CHECK_CUDA(stream);
cudf::nvtx::range_pop();
return std::move(output);
}
template <typename T, typename agg_op, aggregation::Kind op, typename WindowIterator>
std::enable_if_t<!cudf::detail::is_supported<T, agg_op,
op, op == aggregation::MEAN>(), std::unique_ptr<column>>
launch (column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource *mr,
cudaStream_t stream) {
CUDF_FAIL("Aggregation operator and/or input type combination is invalid");
}
template<aggregation::Kind op, typename WindowIterator>
std::enable_if_t<!(op == aggregation::MEAN), std::unique_ptr<column>>
operator()(column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource *mr,
cudaStream_t stream)
{
return launch <InputType, typename corresponding_operator<op>::type, op, WindowIterator> (
input,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
mr,
stream);
}
template<aggregation::Kind op, typename WindowIterator>
std::enable_if_t<(op == aggregation::MEAN), std::unique_ptr<column>>
operator()(column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource *mr,
cudaStream_t stream) {
return launch <InputType, cudf::DeviceSum, op, WindowIterator> (
input,
preceding_window_begin,
following_window_begin,
min_periods,
agg,
mr,
stream);
}
};
struct dispatch_rolling {
template <typename T, typename WindowIterator>
std::unique_ptr<column> operator()(column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource *mr,
cudaStream_t stream) {
return aggregation_dispatcher(agg->kind, rolling_window_launcher<T>{},
input,
preceding_window_begin, following_window_begin,
min_periods, agg, mr, stream);
}
};
} // namespace anonymous
// Applies a user-defined rolling window function to the values in a column.
template <bool static_window, typename WindowIterator>
std::unique_ptr<column> rolling_window_udf(column_view const &input,
WindowIterator preceding_window,
WindowIterator following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0)
{
static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(),
"bitmask_type size does not match CUDA warp size");
if (input.has_nulls())
CUDF_FAIL("Currently the UDF version of rolling window does NOT support inputs with nulls.");
cudf::nvtx::range_push("CUDF_ROLLING_WINDOW", cudf::nvtx::color::ORANGE);
min_periods = std::max(min_periods, 1);
auto udf_agg = static_cast<udf_aggregation*>(agg.get());
std::string hash = "prog_experimental_rolling."
+ std::to_string(std::hash<std::string>{}(udf_agg->_source));
std::string cuda_source;
switch(udf_agg->kind){
case aggregation::Kind::PTX:
cuda_source = cudf::experimental::rolling::jit::code::kernel_headers;
cuda_source += cudf::jit::parse_single_function_ptx(udf_agg->_source, udf_agg->_function_name,
cudf::jit::get_type_name(udf_agg->_output_type),
{0, 5}); // args 0 and 5 are pointers.
cuda_source += cudf::experimental::rolling::jit::code::kernel;
break;
case aggregation::Kind::CUDA:
cuda_source = cudf::experimental::rolling::jit::code::kernel_headers;
cuda_source += cudf::jit::parse_single_function_cuda(udf_agg->_source, udf_agg->_function_name);
cuda_source += cudf::experimental::rolling::jit::code::kernel;
break;
default:
CUDF_FAIL("Unsupported UDF type.");
}
std::unique_ptr<column> output = make_numeric_column(udf_agg->_output_type, input.size(),
cudf::UNINITIALIZED, stream, mr);
auto output_view = output->mutable_view();
rmm::device_scalar<size_type> device_valid_count{0, stream};
// Launch the jitify kernel
cudf::jit::launcher(hash, cuda_source,
{ cudf_types_hpp, cudf_utilities_bit_hpp,
cudf::experimental::rolling::jit::code::operation_h },
{ "-std=c++14", "-w" }, nullptr, stream)
.set_kernel_inst("gpu_rolling_new", // name of the kernel we are launching
{ cudf::jit::get_type_name(input.type()), // list of template arguments
cudf::jit::get_type_name(output->type()),
udf_agg->_operator_name,
static_window ? "cudf::size_type" : "cudf::size_type*"})
.launch(input.size(), cudf::jit::get_data_ptr(input), input.null_mask(),
cudf::jit::get_data_ptr(output_view), output_view.null_mask(),
device_valid_count.data(), preceding_window, following_window, min_periods);
output->set_null_count(output->size() - device_valid_count.value(stream));
// check the stream for debugging
CHECK_CUDA(stream);
cudf::nvtx::range_pop();
return output;
}
// Applies a rolling window function to the values in a column.
template <typename WindowIterator>
std::unique_ptr<column> rolling_window(column_view const& input,
WindowIterator preceding_window_begin,
WindowIterator following_window_begin,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0)
{
static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(),
"bitmask_type size does not match CUDA warp size");
return cudf::experimental::type_dispatcher(input.type(),
dispatch_rolling{},
input,
preceding_window_begin,
following_window_begin,
min_periods, agg, mr, stream);
}
} // namespace detail
// Applies a fixed-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
size_type preceding_window,
size_type following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS((min_periods >= 0), "min_periods must be non-negative");
if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) {
return cudf::experimental::detail::rolling_window_udf<true>(input,
preceding_window,
following_window,
min_periods, agg, mr, 0);
} else {
auto preceding_window_begin = thrust::make_constant_iterator(preceding_window);
auto following_window_begin = thrust::make_constant_iterator(following_window);
return cudf::experimental::detail::rolling_window(input,
preceding_window_begin,
following_window_begin,
min_periods, agg, mr, 0);
}
}
// Applies a variable-size rolling window function to the values in a column.
std::unique_ptr<column> rolling_window(column_view const& input,
column_view const& preceding_window,
column_view const& following_window,
size_type min_periods,
std::unique_ptr<aggregation> const& agg,
rmm::mr::device_memory_resource* mr)
{
if (preceding_window.size() == 0 || following_window.size() == 0) return empty_like(input);
CUDF_EXPECTS(preceding_window.type().id() == INT32 && following_window.type().id() == INT32,
"preceding_window/following_window must have INT32 type");
CUDF_EXPECTS(preceding_window.size() == input.size() && following_window.size() == input.size(),
"preceding_window/following_window size must match input size");
if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) {
return cudf::experimental::detail::rolling_window_udf<false>(input,
preceding_window.begin<size_type>(),
following_window.begin<size_type>(),
min_periods, agg, mr, 0);
} else {
return cudf::experimental::detail::rolling_window(input,
preceding_window.begin<size_type>(),
following_window.begin<size_type>(),
min_periods, agg, mr, 0);
}
}
} // namespace experimental
} // namespace cudf
|
bae672e442415d7dad9b16e465f60d2aa28f36a5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#define __cplusplus
#include "hip/hip_fp16.h"
#include "hip/hip_runtime.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = { sizeof(half) * iGiveSize,iOutSize*sizeof(half)};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum]) cuda_free_allType(publicMemory[cnum]);
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(hipPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(hipPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(hipMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
half* bias = (half*)cuda_make_short_array(l->n);
cuda_convert_f32_to_f16(l->biases_gpu, l->n, bias);
cuda_free(l->biases_gpu);
DecGenerateMemory(l->n * sizeof(float));
l->biases_gpu = (float*)bias;
/*check_error(hipMemcpy(l.weights_gpu, halfWeights, l.nweights * sizeof(half), hipMemcpyDeviceToDevice));
cuda_free_allType(halfWeights);
DecGenerateMemory(l.nweights * sizeof(half));*/
}
__global__ void add_bias_half_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
half a = output[(batch * n + filter) * size + offset];
output[(batch * n + filter) * size + offset] =__hadd(a, biases[filter]);
}
void add_bias_half_gpu(half* output, half* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(hipPeekAtLastError());
}
__global__ void activate_array_hardtan_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hlt(b, half(-1.0f))) output[iOutDex] = half(-1.0f);
if (__hgt(b, half(1.0f))) output[iOutDex] = half(1.0f);
output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// if (a < -1) a = -1;
// if (a > 1) a = 1;
// x[index] = a;//hardtan_activate_kernel(x[index]);
//}
}
__global__ void activate_array_relu_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] = half(0.0f);
//output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = a * (a > 0);// relu_activate_kernel(x[index]);
//}
}
__global__ void activate_array_leaky_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] =__hmul(half(0.1f),b);
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = (a > 0) ? a : .1f * a; //leaky_activate_kernel(x[index]);
//}
}
//__global__ void activate_array_selu_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int offset = blockIdx.x * blockDim.x + threadIdx.x;
// int filter = blockIdx.y;
// int batch = blockIdx.z;
// if (offset >= size) return;
// int iOutDex = (batch * n + filter) * size + offset;
// half a = output[iOutDex];
// half b = __hadd(a, biases[filter]);
// if (__hgt(b, half(0.0f))) output[iOutDex] = b;
// else output[iOutDex] = __hmul(half(0.1f), b);
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (a >= 0) * 1.0507f * a + (a < 0) * 1.0507f * 1.6732f * (expf(a) - 1);
// }
//}
//
//__global__ void activate_array_logistic_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = 1.f / (1.f + expf(-a));
// }
//}
//
//__global__ void activate_array_tanh_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (2.f / (1 + expf(-2 * a)) - 1);
// }
//}
void add_bias_activation_half_gpu(half* output, half* biases, int batch, int n, int size
,ACTIVATION act,int bUnSupportAct)
{
if (bUnSupportAct) add_bias_half_gpu(output, biases, batch, n, size);
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
switch (act)
{
case RELU:
activate_array_relu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LINEAR:
break;
case LEAKY:
activate_array_leaky_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case HARDTAN:
activate_array_hardtan_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
/* case SELU:
activate_array_selu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LOGISTIC:
activate_array_logistic_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case TANH:
activate_array_tanh_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;*/
}
check_error(hipPeekAtLastError());
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
hipError_t stats = hipMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), hipMemcpyDeviceToDevice);
}*/
#endif
add_bias_activation_half_gpu((half*)output, (half*)l.biases_gpu, l.batch, l.n, l.out_w* l.out_h,l.activation
,prop->bUnSupportActivate);
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(l.output_gpu, l.outputs, 0);
// exit(0);
#endif
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
#endif
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
if(prop->bUnSupportActivate) activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if (l.binary || l.xnor) swap_binary(&l);
}
|
bae672e442415d7dad9b16e465f60d2aa28f36a5.cu
|
#include "pch.h"
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#define __cplusplus
#include "cuda_fp16.h"
#include "cuda.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = { sizeof(half) * iGiveSize,iOutSize*sizeof(half)};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum]) cuda_free_allType(publicMemory[cnum]);
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(cudaPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(cudaPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(cudaMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
half* bias = (half*)cuda_make_short_array(l->n);
cuda_convert_f32_to_f16(l->biases_gpu, l->n, bias);
cuda_free(l->biases_gpu);
DecGenerateMemory(l->n * sizeof(float));
l->biases_gpu = (float*)bias;
/*check_error(cudaMemcpy(l.weights_gpu, halfWeights, l.nweights * sizeof(half), cudaMemcpyDeviceToDevice));
cuda_free_allType(halfWeights);
DecGenerateMemory(l.nweights * sizeof(half));*/
}
__global__ void add_bias_half_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
half a = output[(batch * n + filter) * size + offset];
output[(batch * n + filter) * size + offset] =__hadd(a, biases[filter]);
}
void add_bias_half_gpu(half* output, half* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void activate_array_hardtan_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hlt(b, half(-1.0f))) output[iOutDex] = half(-1.0f);
if (__hgt(b, half(1.0f))) output[iOutDex] = half(1.0f);
output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// if (a < -1) a = -1;
// if (a > 1) a = 1;
// x[index] = a;//hardtan_activate_kernel(x[index]);
//}
}
__global__ void activate_array_relu_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] = half(0.0f);
//output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = a * (a > 0);// relu_activate_kernel(x[index]);
//}
}
__global__ void activate_array_leaky_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] =__hmul(half(0.1f),b);
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = (a > 0) ? a : .1f * a; //leaky_activate_kernel(x[index]);
//}
}
//__global__ void activate_array_selu_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int offset = blockIdx.x * blockDim.x + threadIdx.x;
// int filter = blockIdx.y;
// int batch = blockIdx.z;
// if (offset >= size) return;
// int iOutDex = (batch * n + filter) * size + offset;
// half a = output[iOutDex];
// half b = __hadd(a, biases[filter]);
// if (__hgt(b, half(0.0f))) output[iOutDex] = b;
// else output[iOutDex] = __hmul(half(0.1f), b);
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (a >= 0) * 1.0507f * a + (a < 0) * 1.0507f * 1.6732f * (expf(a) - 1);
// }
//}
//
//__global__ void activate_array_logistic_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = 1.f / (1.f + expf(-a));
// }
//}
//
//__global__ void activate_array_tanh_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (2.f / (1 + expf(-2 * a)) - 1);
// }
//}
void add_bias_activation_half_gpu(half* output, half* biases, int batch, int n, int size
,ACTIVATION act,int bUnSupportAct)
{
if (bUnSupportAct) add_bias_half_gpu(output, biases, batch, n, size);
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
switch (act)
{
case RELU:
activate_array_relu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LINEAR:
break;
case LEAKY:
activate_array_leaky_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case HARDTAN:
activate_array_hardtan_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
/* case SELU:
activate_array_selu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LOGISTIC:
activate_array_logistic_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case TANH:
activate_array_tanh_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;*/
}
check_error(cudaPeekAtLastError());
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
cudaError_t stats = cudaMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), cudaMemcpyDeviceToDevice);
}*/
#endif
add_bias_activation_half_gpu((half*)output, (half*)l.biases_gpu, l.batch, l.n, l.out_w* l.out_h,l.activation
,prop->bUnSupportActivate);
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(l.output_gpu, l.outputs, 0);
// exit(0);
#endif
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
#endif
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
if(prop->bUnSupportActivate) activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if (l.binary || l.xnor) swap_binary(&l);
}
|
d8a4869dffe0ca212c5e82895dfb7b8e436abba5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part 2 of 2: implement the fast kernel using shared memory
__global__ void reverseArrayBlock(int *d_out, int *d_in)
{
extern __shared__ int s_data[];
// Load one element per thread from device memory and store it
// *in reversed order* into temporary shared memory
???
// Block until all threads in the block have written their data to shared mem
???
// write the data from shared memory in forward order,
// but to the reversed block offset as before
???
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory and size
int *h_a;
int dimA = 256 * 1024; // 256K elements (1MB total)
// pointer for device memory
int *d_b, *d_a;
// define grid and block size
int numThreadsPerBlock = 256;
// Compute number of blocks needed based on array size and desired block size
int numBlocks = dimA / numThreadsPerBlock;
// Part 1 of 2: Compute the number of bytes of shared memory needed
// This is used in the kernel invocation below
int sharedMemSize = ???;
// allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
hipMalloc( (void **) &d_a, memSize );
hipMalloc( (void **) &d_b, memSize );
// Initialize input array on host
for (int i = 0; i < dimA; ++i)
{
h_a[i] = i;
}
// Copy host array to device array
hipMemcpy( d_a, h_a, memSize, hipMemcpyHostToDevice );
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
hipLaunchKernelGGL(( reverseArrayBlock), dim3(dimGrid), dim3(dimBlock), sharedMemSize , 0, d_b, d_a );
// block until the device has completed
hipDeviceSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// device to host copy
hipMemcpy( h_a, d_b, memSize, hipMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("memcpy");
// verify the data returned to the host is correct
for (int i = 0; i < dimA; i++)
{
assert(h_a[i] == dimA - 1 - i );
}
// free device memory
hipFree(d_a);
hipFree(d_b);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
d8a4869dffe0ca212c5e82895dfb7b8e436abba5.cu
|
/*
* Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// includes, system
#include <stdio.h>
#include <assert.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
// Part 2 of 2: implement the fast kernel using shared memory
__global__ void reverseArrayBlock(int *d_out, int *d_in)
{
extern __shared__ int s_data[];
// Load one element per thread from device memory and store it
// *in reversed order* into temporary shared memory
???
// Block until all threads in the block have written their data to shared mem
???
// write the data from shared memory in forward order,
// but to the reversed block offset as before
???
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory and size
int *h_a;
int dimA = 256 * 1024; // 256K elements (1MB total)
// pointer for device memory
int *d_b, *d_a;
// define grid and block size
int numThreadsPerBlock = 256;
// Compute number of blocks needed based on array size and desired block size
int numBlocks = dimA / numThreadsPerBlock;
// Part 1 of 2: Compute the number of bytes of shared memory needed
// This is used in the kernel invocation below
int sharedMemSize = ???;
// allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc( (void **) &d_a, memSize );
cudaMalloc( (void **) &d_b, memSize );
// Initialize input array on host
for (int i = 0; i < dimA; ++i)
{
h_a[i] = i;
}
// Copy host array to device array
cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice );
// launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
reverseArrayBlock<<< dimGrid, dimBlock, sharedMemSize >>>( d_b, d_a );
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
// device to host copy
cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("memcpy");
// verify the data returned to the host is correct
for (int i = 0; i < dimA; i++)
{
assert(h_a[i] == dimA - 1 - i );
}
// free device memory
cudaFree(d_a);
cudaFree(d_b);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
4594a255fbe034e19917265a3bea548fc6a84316.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <time.h>
#include <stdio.h>
#include <cstdlib>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
__global__ void prime_number(int * n, int * p)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j=0;
int prime=1;
if (i < *n)
{
for ( j = 2; j < i ; j++ )
{
if ( ( i % j ) == 0 )
{
prime = 0;
break;
}
}
if(i==0||i==1)
prime=0;
p[i]=prime;
}
}
__global__ void reduce( int *a, int *b,int * d_A) {
__shared__ int cache[256];//hebras por bloque
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
int temp = 0;
int N=*d_A;
while (tid < N) {
temp += a[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2 // because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0) b[blockIdx.x] = cache[0];
}
int main(int argc, char *argv[])
{
//Mostrar especificaciones de la mquina
clock_t start_cpu, end_cpu;
double cpu_time_used;
if (argc != 2){
printf("1: tama argc %i", argc);
}
else{
start_cpu = clock();
/*
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
}*/
// Origen
int n=atoi(argv[1]);
int *h_A = &n;
size_t sizeTama = n * sizeof(int);
int *h_prime= (int*) malloc(sizeTama);
int *h_result= (int*) malloc(sizeTama);
int *h_B= (int*) malloc(sizeTama);
// Destino
int *d_A = NULL;
hipMalloc((void **)&d_A, sizeof(int));
int *d_B = NULL;
hipMalloc((void **)&d_B,sizeTama);
int *d_prime=NULL;
hipMalloc((void **)&d_prime,sizeTama);
int *d_result=NULL;
hipMalloc((void **)&d_result,sizeTama);
//printf("NNA %i\n",*h_A);
//printf("Copy input data from the host memory to the CUDA device\n");
// Launch the Vector Add CUDA Kernel
float elapsed = 0, elapsed2=0;
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipMemcpy(d_A, h_A, sizeof(int), hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
prime_number << <blocksPerGrid, threadsPerBlock >> >(d_A,d_prime);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
hipMemcpy(h_prime, d_prime, sizeTama, hipMemcpyDeviceToHost);
hipMalloc((void **)&d_prime,sizeTama);
hipMemcpy(d_prime, h_prime, sizeTama, hipMemcpyHostToDevice);
hipMemcpy(d_A, h_A, sizeof(int), hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
reduce <<<blocksPerGrid, threadsPerBlock >> >(d_prime,d_B,d_A);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed2, start, stop);
hipMemcpy(h_B, d_B, sizeTama, hipMemcpyDeviceToHost);
hipEventDestroy(start);
hipEventDestroy(stop);
int total_primes=0;
for (int i=0;i<blocksPerGrid;i++){
total_primes +=h_B[i];
}
end_cpu = clock();
cpu_time_used = ((double)(end_cpu - start_cpu)/(CLOCKS_PER_SEC));
printf("%i\t%f \n",n, cpu_time_used + (elapsed*0.001+elapsed2*0.001));
// Free device global memory
hipFree(d_A);
hipFree(d_prime);
// Free host memory
//free(h_A);
free(h_prime);
hipDeviceReset();
}
return 0;
}
|
4594a255fbe034e19917265a3bea548fc6a84316.cu
|
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <time.h>
#include <stdio.h>
#include <cstdlib>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
__global__ void prime_number(int * n, int * p)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j=0;
int prime=1;
if (i < *n)
{
for ( j = 2; j < i ; j++ )
{
if ( ( i % j ) == 0 )
{
prime = 0;
break;
}
}
if(i==0||i==1)
prime=0;
p[i]=prime;
}
}
__global__ void reduce( int *a, int *b,int * d_A) {
__shared__ int cache[256];//hebras por bloque
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
int temp = 0;
int N=*d_A;
while (tid < N) {
temp += a[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2 // because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0) b[blockIdx.x] = cache[0];
}
int main(int argc, char *argv[])
{
//Mostrar especificaciones de la máquina
clock_t start_cpu, end_cpu;
double cpu_time_used;
if (argc != 2){
printf("1: tama argc %i", argc);
}
else{
start_cpu = clock();
/*
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
}*/
// Origen
int n=atoi(argv[1]);
int *h_A = &n;
size_t sizeTama = n * sizeof(int);
int *h_prime= (int*) malloc(sizeTama);
int *h_result= (int*) malloc(sizeTama);
int *h_B= (int*) malloc(sizeTama);
// Destino
int *d_A = NULL;
cudaMalloc((void **)&d_A, sizeof(int));
int *d_B = NULL;
cudaMalloc((void **)&d_B,sizeTama);
int *d_prime=NULL;
cudaMalloc((void **)&d_prime,sizeTama);
int *d_result=NULL;
cudaMalloc((void **)&d_result,sizeTama);
//printf("NNA %i\n",*h_A);
//printf("Copy input data from the host memory to the CUDA device\n");
// Launch the Vector Add CUDA Kernel
float elapsed = 0, elapsed2=0;
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
cudaMemcpy(d_A, h_A, sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
prime_number << <blocksPerGrid, threadsPerBlock >> >(d_A,d_prime);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaMemcpy(h_prime, d_prime, sizeTama, cudaMemcpyDeviceToHost);
cudaMalloc((void **)&d_prime,sizeTama);
cudaMemcpy(d_prime, h_prime, sizeTama, cudaMemcpyHostToDevice);
cudaMemcpy(d_A, h_A, sizeof(int), cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
reduce <<<blocksPerGrid, threadsPerBlock >> >(d_prime,d_B,d_A);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed2, start, stop);
cudaMemcpy(h_B, d_B, sizeTama, cudaMemcpyDeviceToHost);
cudaEventDestroy(start);
cudaEventDestroy(stop);
int total_primes=0;
for (int i=0;i<blocksPerGrid;i++){
total_primes +=h_B[i];
}
end_cpu = clock();
cpu_time_used = ((double)(end_cpu - start_cpu)/(CLOCKS_PER_SEC));
printf("%i\t%f \n",n, cpu_time_used + (elapsed*0.001+elapsed2*0.001));
// Free device global memory
cudaFree(d_A);
cudaFree(d_prime);
// Free host memory
//free(h_A);
free(h_prime);
cudaDeviceReset();
}
return 0;
}
|
011c723bf3dc75390d82cc4308aa3f1014286116.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "extra/RenyiDoubleSum.hpp"
#include "Spins.h"
#include "types.h"
#include "utils.kernel"
namespace rbm_on_gpu {
namespace kernel {
template<int TILE_SIZE>
HDINLINE void renyi_double_sum(double* result, const double* rho_diag, unsigned int N) {
#include "cuda_kernel_defines.h"
SHARED double row[TILE_SIZE]; // using a register is not faster
SHARED double col[TILE_SIZE];
auto thread_result = 0.0;
#ifdef __CUDA_ARCH__
const auto x_offset = blockIdx.x * TILE_SIZE;
#else
for(auto x_offset = 0u; x_offset < N; x_offset += TILE_SIZE)
#endif
{
MULTI(i, TILE_SIZE) {
row[i] = rho_diag[x_offset + i];
}
for(auto y_offset = 0u; y_offset < N; y_offset += TILE_SIZE) {
MULTI(i, TILE_SIZE) {
col[i] = rho_diag[y_offset + i];
}
SYNC;
MULTI(m, TILE_SIZE) {
for(auto n = 0u; n < TILE_SIZE; n++) {
const auto hamming_distance = bit_count(
(x_offset + m) ^ (y_offset + n)
);
const auto hamming_sign = (hamming_distance & 1u) ? -1.0 : 1.0;
const auto hamming_weight = 1.0 / double(1u << hamming_distance);
thread_result += hamming_sign * hamming_weight * row[m] * col[n];
}
}
SYNC;
}
}
#ifdef __CUDA_ARCH__
SHARED double block_result;
tree_sum(block_result, TILE_SIZE, thread_result);
SINGLE {
generic_atomicAdd(result, block_result);
}
#else
*result = thread_result;
#endif
}
} // namespace kernel
double renyi_double_sum(const Array<double>& rho_diag) {
const bool gpu = rho_diag.gpu;
const auto N = rho_diag.size();
Array<double> result(1, gpu);
result.clear();
auto result_ptr = result.data();
auto rho_diag_ptr = rho_diag.data();
if(gpu) {
if(N >= 256) {
constexpr auto TILE_SIZE = 256u;
hipLaunchKernelGGL(( cuda_kernel), dim3(N / TILE_SIZE), dim3(TILE_SIZE), 0, 0,
[=] __device__ () {
kernel::renyi_double_sum<TILE_SIZE>(result_ptr, rho_diag_ptr, N);
}
);
}
else if(N >= 32) {
constexpr auto TILE_SIZE = 32u;
hipLaunchKernelGGL(( cuda_kernel), dim3(N / TILE_SIZE), dim3(TILE_SIZE), 0, 0,
[=] __device__ () {
kernel::renyi_double_sum<TILE_SIZE>(result_ptr, rho_diag_ptr, N);
}
);
}
else {
constexpr auto TILE_SIZE = 4u;
hipLaunchKernelGGL(( cuda_kernel), dim3(N / TILE_SIZE), dim3(TILE_SIZE), 0, 0,
[=] __device__ () {
kernel::renyi_double_sum<TILE_SIZE>(result_ptr, rho_diag_ptr, N);
}
);
}
}
else {
if(N >= 32u) {
kernel::renyi_double_sum<32u>(result.data(), rho_diag.data(), N);
}
else {
kernel::renyi_double_sum<4u>(result.data(), rho_diag.data(), N);
}
}
result.update_host();
return result.front();
}
} // namespace rbm_on_gpu
|
011c723bf3dc75390d82cc4308aa3f1014286116.cu
|
#include "extra/RenyiDoubleSum.hpp"
#include "Spins.h"
#include "types.h"
#include "utils.kernel"
namespace rbm_on_gpu {
namespace kernel {
template<int TILE_SIZE>
HDINLINE void renyi_double_sum(double* result, const double* rho_diag, unsigned int N) {
#include "cuda_kernel_defines.h"
SHARED double row[TILE_SIZE]; // using a register is not faster
SHARED double col[TILE_SIZE];
auto thread_result = 0.0;
#ifdef __CUDA_ARCH__
const auto x_offset = blockIdx.x * TILE_SIZE;
#else
for(auto x_offset = 0u; x_offset < N; x_offset += TILE_SIZE)
#endif
{
MULTI(i, TILE_SIZE) {
row[i] = rho_diag[x_offset + i];
}
for(auto y_offset = 0u; y_offset < N; y_offset += TILE_SIZE) {
MULTI(i, TILE_SIZE) {
col[i] = rho_diag[y_offset + i];
}
SYNC;
MULTI(m, TILE_SIZE) {
for(auto n = 0u; n < TILE_SIZE; n++) {
const auto hamming_distance = bit_count(
(x_offset + m) ^ (y_offset + n)
);
const auto hamming_sign = (hamming_distance & 1u) ? -1.0 : 1.0;
const auto hamming_weight = 1.0 / double(1u << hamming_distance);
thread_result += hamming_sign * hamming_weight * row[m] * col[n];
}
}
SYNC;
}
}
#ifdef __CUDA_ARCH__
SHARED double block_result;
tree_sum(block_result, TILE_SIZE, thread_result);
SINGLE {
generic_atomicAdd(result, block_result);
}
#else
*result = thread_result;
#endif
}
} // namespace kernel
double renyi_double_sum(const Array<double>& rho_diag) {
const bool gpu = rho_diag.gpu;
const auto N = rho_diag.size();
Array<double> result(1, gpu);
result.clear();
auto result_ptr = result.data();
auto rho_diag_ptr = rho_diag.data();
if(gpu) {
if(N >= 256) {
constexpr auto TILE_SIZE = 256u;
cuda_kernel<<<N / TILE_SIZE, TILE_SIZE>>>(
[=] __device__ () {
kernel::renyi_double_sum<TILE_SIZE>(result_ptr, rho_diag_ptr, N);
}
);
}
else if(N >= 32) {
constexpr auto TILE_SIZE = 32u;
cuda_kernel<<<N / TILE_SIZE, TILE_SIZE>>>(
[=] __device__ () {
kernel::renyi_double_sum<TILE_SIZE>(result_ptr, rho_diag_ptr, N);
}
);
}
else {
constexpr auto TILE_SIZE = 4u;
cuda_kernel<<<N / TILE_SIZE, TILE_SIZE>>>(
[=] __device__ () {
kernel::renyi_double_sum<TILE_SIZE>(result_ptr, rho_diag_ptr, N);
}
);
}
}
else {
if(N >= 32u) {
kernel::renyi_double_sum<32u>(result.data(), rho_diag.data(), N);
}
else {
kernel::renyi_double_sum<4u>(result.data(), rho_diag.data(), N);
}
}
result.update_host();
return result.front();
}
} // namespace rbm_on_gpu
|
5c9a88e996dadbc3481fe4e38f5ada322b42b625.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
__global__ void Polynomial1DKernel(tfloat* d_x, tfloat* d_output, int npoints, tfloat* d_factors, int degree);
////////////////////////////////////////
//Calculate polynomial at given points//
////////////////////////////////////////
void d_Polynomial1D(tfloat* d_x, tfloat* d_output, int npoints, tfloat* d_factors, int degree, int batch)
{
dim3 TpB = dim3(min(256, NextMultipleOf(npoints, 32)));
dim3 grid = dim3((npoints + TpB.x - 1) / TpB.x, batch);
Polynomial1DKernel << <grid, TpB >> > (d_x, d_output, npoints, d_factors, degree);
}
__global__ void Polynomial1DKernel(tfloat* d_x, tfloat* d_output, int npoints, tfloat* d_factors, int degree)
{
d_x += npoints * blockIdx.y;
d_output += npoints * blockIdx.y;
d_factors += degree * blockIdx.y;
__shared__ tfloat s_factors[1024];
for (int i = threadIdx.x; i < degree; i += blockDim.x)
s_factors[i] = d_factors[i];
__syncthreads();
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < npoints; idx++)
{
tfloat val = s_factors[0];
tfloat x = d_x[idx];
tfloat lastx = x;
for (int f = 1; f < degree; f++)
{
val += lastx * s_factors[f];
lastx *= x;
}
d_output[idx] = val;
}
}
}
|
5c9a88e996dadbc3481fe4e38f5ada322b42b625.cu
|
#include "Prerequisites.cuh"
namespace gtom
{
////////////////////////////
//CUDA kernel declarations//
////////////////////////////
__global__ void Polynomial1DKernel(tfloat* d_x, tfloat* d_output, int npoints, tfloat* d_factors, int degree);
////////////////////////////////////////
//Calculate polynomial at given points//
////////////////////////////////////////
void d_Polynomial1D(tfloat* d_x, tfloat* d_output, int npoints, tfloat* d_factors, int degree, int batch)
{
dim3 TpB = dim3(min(256, NextMultipleOf(npoints, 32)));
dim3 grid = dim3((npoints + TpB.x - 1) / TpB.x, batch);
Polynomial1DKernel << <grid, TpB >> > (d_x, d_output, npoints, d_factors, degree);
}
__global__ void Polynomial1DKernel(tfloat* d_x, tfloat* d_output, int npoints, tfloat* d_factors, int degree)
{
d_x += npoints * blockIdx.y;
d_output += npoints * blockIdx.y;
d_factors += degree * blockIdx.y;
__shared__ tfloat s_factors[1024];
for (int i = threadIdx.x; i < degree; i += blockDim.x)
s_factors[i] = d_factors[i];
__syncthreads();
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < npoints; idx++)
{
tfloat val = s_factors[0];
tfloat x = d_x[idx];
tfloat lastx = x;
for (int f = 1; f < degree; f++)
{
val += lastx * s_factors[f];
lastx *= x;
}
d_output[idx] = val;
}
}
}
|
da8c82cb92df2bc6b41566d031b1f9fc047fdf4f.hip
|
// !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*
* (C) Copyright 2014 The Board of Trustees of the
* Florida Institute of Technology
* All Rights Reserved
*
* Lab 2 Matrix Multiplication
******************************************************************************/
// System includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <time.h>
#include <iostream>
//CUDA runtime
#include <hip/hip_runtime.h>
//Helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
#include <device_launch_parameters.h>
using namespace std;
extern __shared__ float Mds[];
extern __shared__ float Nds[];
void verify(float *A_h, float *B_h, float *C_h, int width);
__global__ void matrixMulKernel(int width, float *A_d, float *B_d, float* C_d) {
/********************************************************************
*
* Compute C = A x B
* where A is a (width x width) matrix
* where B is a (width x width) matrix
* where C is a (width x width) matrix
*
********************************************************************/
// INSERT KERNEL CODE HERE
#ifdef USE_TILE
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
if ((row < width) && (col < width)){
float Pvalue = 0;
for(int m=0; m<width/TILE_WIDTH; ++m) {
Mds[ty][tx] = d_M[row*width + m*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(m*TILE_WIDTH + ty)*width + col];
__syncthreads();
for(int k=0; k<TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
C_d[row*width + col] = Pvalue;
}
#else
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if ((row < width) && (col < width)){
float Pvalue = 0;
for (int k = 0; k < width; k++){
Pvalue += A_d[row*width + k] * B_d[k*width+col];
}
C_d[row*width + col] = Pvalue;
}
#endif
}
extern "C" void
matrixMultiplicationFunction(int width)
{
clock_t beginTotalTime;
clock_t endTotalTime;
clock_t begin;
clock_t end;
beginTotalTime = clock();
printf("\nSetting up the problem...\n"); fflush(stdout);
begin = clock();
/* Matrix size in bytes */
/* Calculate the data size for the matrix allocation */
int nElements = width*width;
/* Initialize the matrices with random data */
// Random data for host array A
float* A_h = (float*)malloc(nElements*sizeof(float));
for (unsigned int i = 0; i < nElements; i++) {
A_h[i] = (rand() % 100) / 100.00;
}
//Random data for host array B
float* B_h = (float*)malloc(nElements*sizeof(float));
for (int i = 0; i < nElements; i++) {
B_h[i] = (rand() % 100) / 100.00;
}
float* C_h = (float*)malloc(nElements*sizeof(float));
end = clock();
printf("Elapsed: %f seconds\n", (double)(end - begin) / CLOCKS_PER_SEC);
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", width, width,
width, width, width, width);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables...\n"); fflush(stdout);
begin = clock();
//INSERT CODE HERE
float *d_A, *d_B, *d_C;
hipMalloc((void **)&d_A, nElements*sizeof(float));
hipMalloc((void **)&d_B, nElements*sizeof(float));
hipMalloc((void **)&d_C, nElements*sizeof(float));
checkCudaErrors(hipDeviceSynchronize());
end = clock();
printf("Elapsed: %f seconds\n\n", (double)(end - begin) / CLOCKS_PER_SEC);
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device...\n"); fflush(stdout);
begin = clock();
//INSERT CODE HERE
hipMemcpy(d_A, A_h, nElements*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, B_h, nElements*sizeof(float), hipMemcpyHostToDevice);
checkCudaErrors(hipDeviceSynchronize());
end = clock();
printf("Elapsed: %f seconds\n\n", (double)(end - begin) / CLOCKS_PER_SEC);
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel...\n"); fflush(stdout);
begin = clock();
// Initialize thread block and kernel grid dimensions ---------------------
int BLOCK_SIZE = 16; // Use 16x16 thread blocks
//INSERT CODE HERE
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 dimGrid(ceil(width / (float)BLOCK_SIZE), ceil(width / (float)BLOCK_SIZE), 1);
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
hipLaunchKernelGGL(( matrixMulKernel) , dim3(dimGrid), dim3(dimBlock), 256 , 0, width, d_A, d_B, d_C);
checkCudaErrors(hipDeviceSynchronize());
end = clock();
printf("Elapsed: %f seconds\n\n", (double)(end - begin) / CLOCKS_PER_SEC);
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host...\n"); fflush(stdout);
begin = clock();
//INSERT CODE HERE
hipMemcpy(C_h, d_C, nElements*sizeof(float), hipMemcpyDeviceToHost);
checkCudaErrors(hipDeviceSynchronize());
end = clock();
printf("Elapsed: %f seconds\n\n", (double)(end - begin) / CLOCKS_PER_SEC);
endTotalTime = clock();
// Verify correctness -----------------------------------------------------
printf("Verifying results...\n"); fflush(stdout);
verify(A_h, B_h, C_h, width);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
hipFree(d_A); hipFree(d_B); hipFree(d_C);
printf("Total Elapsed Processing: %f seconds\n", (double)(endTotalTime - beginTotalTime) / CLOCKS_PER_SEC);
}
void verify(float *A_h, float *B_h, float *C_h, int width) {
const float relativeTolerance = 1e-6;
for (int row = 0; row < width; ++row) {
for (int col = 0; col < width; ++col) {
float sum = 0;
for (int i = 0; i < width; ++i) {
sum += A_h[row*width + i] * B_h[i*width + col];
}
float relativeError = (sum - C_h[row*width + col]) / sum;
if (relativeError > relativeTolerance
|| relativeError < -relativeTolerance) {
printf("TEST FAILED\n\n");
exit(0);
}
}
}
printf("TEST PASSED\n\n");
}
extern "C" void
matrixMultiplication_C(int width) {
clock_t beginTotalTime;
clock_t endTotalTime;
clock_t begin;
clock_t end;
beginTotalTime = clock();
printf("\nSetting up the problem...\n"); fflush(stdout);
begin = clock();
/* Matrix size in bytes */
/* Calculate the data size for the matrix allocation */
int nElements = width*width;
int dataMemorySize = nElements * sizeof(float);
/* Initialize the matrices with random data */
// Random data for host array A
float* A_h = (float*)malloc(dataMemorySize);
for (unsigned int i = 0; i < nElements; i++) {
A_h[i] = (rand() % 100) / 100.00;
}
//Random data for host array B
float* B_h = (float*)malloc(dataMemorySize);
for (int i = 0; i < nElements; i++) {
B_h[i] = (rand() % 100) / 100.00;
}
float* C_h = (float*)malloc(dataMemorySize);
end = clock();
printf("Elapsed: %f seconds\n", (double)(end - begin) / CLOCKS_PER_SEC);
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", width, width,
width, width, width, width);
printf("Launch C processing...\n"); fflush(stdout);
begin = clock();
for (int row = 0; row < width; ++row) {
for (int col = 0; col < width; ++col) {
float sum = 0;
for (int i = 0; i < width; ++i) {
sum += A_h[row*width + i] * B_h[i*width + col];
}
C_h[row*width + col] = sum;
}
}
end = clock();
printf("Elapsed: %f seconds\n", (double)(end - begin) / CLOCKS_PER_SEC);
endTotalTime = clock();
printf("Verifying results...\n"); fflush(stdout);
verify(A_h, B_h, C_h, width);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
printf("Total Elapsed Processing: %f seconds\n", (double)(endTotalTime - beginTotalTime) / CLOCKS_PER_SEC);
}
|
da8c82cb92df2bc6b41566d031b1f9fc047fdf4f.cu
|
/******************************************************************************
*
* (C) Copyright 2014 The Board of Trustees of the
* Florida Institute of Technology
* All Rights Reserved
*
* Lab 2 Matrix Multiplication
******************************************************************************/
// System includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <time.h>
#include <iostream>
//CUDA runtime
#include <cuda_runtime.h>
//Helper functions and utilities to work with CUDA
#include <helper_cuda.h>
#include <helper_functions.h>
#include <device_launch_parameters.h>
using namespace std;
extern __shared__ float Mds[];
extern __shared__ float Nds[];
void verify(float *A_h, float *B_h, float *C_h, int width);
__global__ void matrixMulKernel(int width, float *A_d, float *B_d, float* C_d) {
/********************************************************************
*
* Compute C = A x B
* where A is a (width x width) matrix
* where B is a (width x width) matrix
* where C is a (width x width) matrix
*
********************************************************************/
// INSERT KERNEL CODE HERE
#ifdef USE_TILE
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
if ((row < width) && (col < width)){
float Pvalue = 0;
for(int m=0; m<width/TILE_WIDTH; ++m) {
Mds[ty][tx] = d_M[row*width + m*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(m*TILE_WIDTH + ty)*width + col];
__syncthreads();
for(int k=0; k<TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
C_d[row*width + col] = Pvalue;
}
#else
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if ((row < width) && (col < width)){
float Pvalue = 0;
for (int k = 0; k < width; k++){
Pvalue += A_d[row*width + k] * B_d[k*width+col];
}
C_d[row*width + col] = Pvalue;
}
#endif
}
extern "C" void
matrixMultiplicationFunction(int width)
{
clock_t beginTotalTime;
clock_t endTotalTime;
clock_t begin;
clock_t end;
beginTotalTime = clock();
printf("\nSetting up the problem...\n"); fflush(stdout);
begin = clock();
/* Matrix size in bytes */
/* Calculate the data size for the matrix allocation */
int nElements = width*width;
/* Initialize the matrices with random data */
// Random data for host array A
float* A_h = (float*)malloc(nElements*sizeof(float));
for (unsigned int i = 0; i < nElements; i++) {
A_h[i] = (rand() % 100) / 100.00;
}
//Random data for host array B
float* B_h = (float*)malloc(nElements*sizeof(float));
for (int i = 0; i < nElements; i++) {
B_h[i] = (rand() % 100) / 100.00;
}
float* C_h = (float*)malloc(nElements*sizeof(float));
end = clock();
printf("Elapsed: %f seconds\n", (double)(end - begin) / CLOCKS_PER_SEC);
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", width, width,
width, width, width, width);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables...\n"); fflush(stdout);
begin = clock();
//INSERT CODE HERE
float *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, nElements*sizeof(float));
cudaMalloc((void **)&d_B, nElements*sizeof(float));
cudaMalloc((void **)&d_C, nElements*sizeof(float));
checkCudaErrors(cudaDeviceSynchronize());
end = clock();
printf("Elapsed: %f seconds\n\n", (double)(end - begin) / CLOCKS_PER_SEC);
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device...\n"); fflush(stdout);
begin = clock();
//INSERT CODE HERE
cudaMemcpy(d_A, A_h, nElements*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B_h, nElements*sizeof(float), cudaMemcpyHostToDevice);
checkCudaErrors(cudaDeviceSynchronize());
end = clock();
printf("Elapsed: %f seconds\n\n", (double)(end - begin) / CLOCKS_PER_SEC);
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel...\n"); fflush(stdout);
begin = clock();
// Initialize thread block and kernel grid dimensions ---------------------
int BLOCK_SIZE = 16; // Use 16x16 thread blocks
//INSERT CODE HERE
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 dimGrid(ceil(width / (float)BLOCK_SIZE), ceil(width / (float)BLOCK_SIZE), 1);
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
matrixMulKernel <<<dimGrid, dimBlock, 256 >>>(width, d_A, d_B, d_C);
checkCudaErrors(cudaDeviceSynchronize());
end = clock();
printf("Elapsed: %f seconds\n\n", (double)(end - begin) / CLOCKS_PER_SEC);
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host...\n"); fflush(stdout);
begin = clock();
//INSERT CODE HERE
cudaMemcpy(C_h, d_C, nElements*sizeof(float), cudaMemcpyDeviceToHost);
checkCudaErrors(cudaDeviceSynchronize());
end = clock();
printf("Elapsed: %f seconds\n\n", (double)(end - begin) / CLOCKS_PER_SEC);
endTotalTime = clock();
// Verify correctness -----------------------------------------------------
printf("Verifying results...\n"); fflush(stdout);
verify(A_h, B_h, C_h, width);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
printf("Total Elapsed Processing: %f seconds\n", (double)(endTotalTime - beginTotalTime) / CLOCKS_PER_SEC);
}
void verify(float *A_h, float *B_h, float *C_h, int width) {
const float relativeTolerance = 1e-6;
for (int row = 0; row < width; ++row) {
for (int col = 0; col < width; ++col) {
float sum = 0;
for (int i = 0; i < width; ++i) {
sum += A_h[row*width + i] * B_h[i*width + col];
}
float relativeError = (sum - C_h[row*width + col]) / sum;
if (relativeError > relativeTolerance
|| relativeError < -relativeTolerance) {
printf("TEST FAILED\n\n");
exit(0);
}
}
}
printf("TEST PASSED\n\n");
}
extern "C" void
matrixMultiplication_C(int width) {
clock_t beginTotalTime;
clock_t endTotalTime;
clock_t begin;
clock_t end;
beginTotalTime = clock();
printf("\nSetting up the problem...\n"); fflush(stdout);
begin = clock();
/* Matrix size in bytes */
/* Calculate the data size for the matrix allocation */
int nElements = width*width;
int dataMemorySize = nElements * sizeof(float);
/* Initialize the matrices with random data */
// Random data for host array A
float* A_h = (float*)malloc(dataMemorySize);
for (unsigned int i = 0; i < nElements; i++) {
A_h[i] = (rand() % 100) / 100.00;
}
//Random data for host array B
float* B_h = (float*)malloc(dataMemorySize);
for (int i = 0; i < nElements; i++) {
B_h[i] = (rand() % 100) / 100.00;
}
float* C_h = (float*)malloc(dataMemorySize);
end = clock();
printf("Elapsed: %f seconds\n", (double)(end - begin) / CLOCKS_PER_SEC);
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", width, width,
width, width, width, width);
printf("Launch C processing...\n"); fflush(stdout);
begin = clock();
for (int row = 0; row < width; ++row) {
for (int col = 0; col < width; ++col) {
float sum = 0;
for (int i = 0; i < width; ++i) {
sum += A_h[row*width + i] * B_h[i*width + col];
}
C_h[row*width + col] = sum;
}
}
end = clock();
printf("Elapsed: %f seconds\n", (double)(end - begin) / CLOCKS_PER_SEC);
endTotalTime = clock();
printf("Verifying results...\n"); fflush(stdout);
verify(A_h, B_h, C_h, width);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
printf("Total Elapsed Processing: %f seconds\n", (double)(endTotalTime - beginTotalTime) / CLOCKS_PER_SEC);
}
|
952adc0f3e302caa99c6a90c6d5475f642eeaaa6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlascl2.cu, normal z -> s, Mon Jun 25 18:24:12 2018
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl2_full(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl2_lower(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl2_upper(int m, int n, const float *D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
SLASCL2 scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_slascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_slascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
hipLaunchKernelGGL(( slascl2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( slascl2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( slascl2_full) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
}
|
952adc0f3e302caa99c6a90c6d5475f642eeaaa6.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlascl2.cu, normal z -> s, Mon Jun 25 18:24:12 2018
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
slascl2_full(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl2_lower(int m, int n, const float* D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl2_upper(int m, int n, const float *D, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
SLASCL2 scales the M by N real matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD REAL vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_slascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_slascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dD,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
slascl2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
slascl2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
slascl2_full <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
}
|
162ad887f61f95e7a1a55ceff6dd5712d43ebf1a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "chTimer.h"
#include <iostream>
#include <fstream>
__global__
void
CopyKernel(long bytes, float *x, float *y) // Kernel that simply multiplies a given array by two (was used to check whether correct data was written in y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < bytes) y[i] = 2*x[i];
}
int main(){
for(int j = 10; j< 31; j++){ //loop through data sizes
long bytes = (1<<j) / sizeof(float); //define data size
//printf("Data size = %ld Bytes: \n",bytes*sizeof(float));
printf("%ld\t",bytes*sizeof(float));
for(int k = 0; k<2; k++){ //loop for pageable and pinned memory
float *x, *d_x, *y, *d_y;
if (k==0){
x = (float*)malloc(bytes*sizeof(float));
y = (float*)malloc(bytes*sizeof(float));
}
else {
hipHostMalloc((void **) &x, bytes*sizeof(float));
hipHostMalloc((void **) &y, bytes*sizeof(float));
};
hipMalloc(&d_x, bytes*sizeof(float));
hipMalloc(&d_y, bytes*sizeof(float));
for (int i = 0; i < bytes; i++) {
x[i] = i/2.;
};
//Timing with clock and chTimer just to check them
chTimerTimestamp start, stop;
clock_t begin = clock();
chTimerGetTime( &start );
hipMemcpy(d_x, x, bytes*sizeof(float), hipMemcpyHostToDevice);
chTimerGetTime( &stop );
clock_t end = clock();
long diff = end - begin;
double microseconds = 1e6*chTimerElapsedTime( &start, &stop );
//printf("Copying HostToDevice took %i cycles / %.2d us \n", diff, microseconds);
printf("%ld\t%f\t",diff,microseconds); //the output is directly in a form in which it can be easily accessed an plotted (but bit harder to read)
hipLaunchKernelGGL(( CopyKernel), dim3(4),dim3(256), 0, 0, bytes, d_x, d_y);
hipDeviceSynchronize();
chTimerTimestamp start2, stop2;
begin = clock();
chTimerGetTime( &start2 );
hipMemcpy(y, d_y, bytes*sizeof(float), hipMemcpyDeviceToHost);
//hipDeviceSynchronize();
chTimerGetTime( &stop2 );
end = clock();
diff = end - begin;
double microseconds2 = 1e6*chTimerElapsedTime( &start2, &stop2 ); //For some reason this is always 1 which is wrong
hipFree(d_x);
hipFree(d_y);
if (k==0){
printf("%ld\t%f\t",diff,microseconds2);
free(x);
free(y);
}
else {
printf("%ld\t%f\n",diff,microseconds2);
hipHostFree(x);
hipHostFree(y);
};
};
};
}
|
162ad887f61f95e7a1a55ceff6dd5712d43ebf1a.cu
|
#include <stdio.h>
#include "chTimer.h"
#include <iostream>
#include <fstream>
__global__
void
CopyKernel(long bytes, float *x, float *y) // Kernel that simply multiplies a given array by two (was used to check whether correct data was written in y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < bytes) y[i] = 2*x[i];
}
int main(){
for(int j = 10; j< 31; j++){ //loop through data sizes
long bytes = (1<<j) / sizeof(float); //define data size
//printf("Data size = %ld Bytes: \n",bytes*sizeof(float));
printf("%ld\t",bytes*sizeof(float));
for(int k = 0; k<2; k++){ //loop for pageable and pinned memory
float *x, *d_x, *y, *d_y;
if (k==0){
x = (float*)malloc(bytes*sizeof(float));
y = (float*)malloc(bytes*sizeof(float));
}
else {
cudaMallocHost((void **) &x, bytes*sizeof(float));
cudaMallocHost((void **) &y, bytes*sizeof(float));
};
cudaMalloc(&d_x, bytes*sizeof(float));
cudaMalloc(&d_y, bytes*sizeof(float));
for (int i = 0; i < bytes; i++) {
x[i] = i/2.;
};
//Timing with clock and chTimer just to check them
chTimerTimestamp start, stop;
clock_t begin = clock();
chTimerGetTime( &start );
cudaMemcpy(d_x, x, bytes*sizeof(float), cudaMemcpyHostToDevice);
chTimerGetTime( &stop );
clock_t end = clock();
long diff = end - begin;
double microseconds = 1e6*chTimerElapsedTime( &start, &stop );
//printf("Copying HostToDevice took %i cycles / %.2d us \n", diff, microseconds);
printf("%ld\t%f\t",diff,microseconds); //the output is directly in a form in which it can be easily accessed an plotted (but bit harder to read)
CopyKernel<<<4,256>>>(bytes, d_x, d_y);
cudaDeviceSynchronize();
chTimerTimestamp start2, stop2;
begin = clock();
chTimerGetTime( &start2 );
cudaMemcpy(y, d_y, bytes*sizeof(float), cudaMemcpyDeviceToHost);
//cudaDeviceSynchronize();
chTimerGetTime( &stop2 );
end = clock();
diff = end - begin;
double microseconds2 = 1e6*chTimerElapsedTime( &start2, &stop2 ); //For some reason this is always 1 which is wrong
cudaFree(d_x);
cudaFree(d_y);
if (k==0){
printf("%ld\t%f\t",diff,microseconds2);
free(x);
free(y);
}
else {
printf("%ld\t%f\n",diff,microseconds2);
cudaFreeHost(x);
cudaFreeHost(y);
};
};
};
}
|
4d6174776b276af1222fdaac4a172b331ed4098f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "soft_rectified_linear_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
__global__ void soft_rectified_linear_kernel(
float4 * __restrict input,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
val.x = __logf(__expf(val.x) + 1.0F);
val.y = __logf(__expf(val.y) + 1.0F);
val.z = __logf(__expf(val.z) + 1.0F);
val.w = __logf(__expf(val.w) + 1.0F);
input[elem_id] = val;
}
}
namespace nnforge
{
namespace cuda
{
soft_rectified_linear_layer_tester_cuda::soft_rectified_linear_layer_tester_cuda()
{
}
soft_rectified_linear_layer_tester_cuda::~soft_rectified_linear_layer_tester_cuda()
{
}
void soft_rectified_linear_layer_tester_cuda::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( soft_rectified_linear_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_buffer,
elem_count);
}
}
}
|
4d6174776b276af1222fdaac4a172b331ed4098f.cu
|
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "soft_rectified_linear_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
__global__ void soft_rectified_linear_kernel(
float4 * __restrict input,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
val.x = __logf(__expf(val.x) + 1.0F);
val.y = __logf(__expf(val.y) + 1.0F);
val.z = __logf(__expf(val.z) + 1.0F);
val.w = __logf(__expf(val.w) + 1.0F);
input[elem_id] = val;
}
}
namespace nnforge
{
namespace cuda
{
soft_rectified_linear_layer_tester_cuda::soft_rectified_linear_layer_tester_cuda()
{
}
soft_rectified_linear_layer_tester_cuda::~soft_rectified_linear_layer_tester_cuda()
{
}
void soft_rectified_linear_layer_tester_cuda::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
soft_rectified_linear_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_buffer,
elem_count);
}
}
}
|
ebf183fd639d60645f1e0b93d44423fbcdcad4ee.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void accumulateRowsKernel( float *input, float *output, int channels, int h, int w) {
// view multichannel image as a multiline single-channel image
int globalRowIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
if (globalRowIdx < channels * h) {
float *outputRow = output + (globalRowIdx + globalRowIdx / h + 1) * (w+1) + 1;
outputRow[-1] = 0;
double sum = 0;
for (int i = 0; i < w; ++i) {
sum += input[globalRowIdx * w + i];
outputRow[i] = static_cast<float>(sum);
}
// need to zero the (0,0) corner of the output separately >:(
output[(globalRowIdx / h) * (w+1) * (h+1)] = 0;
}
}
|
ebf183fd639d60645f1e0b93d44423fbcdcad4ee.cu
|
#include "includes.h"
__global__ void accumulateRowsKernel( float *input, float *output, int channels, int h, int w) {
// view multichannel image as a multiline single-channel image
int globalRowIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
if (globalRowIdx < channels * h) {
float *outputRow = output + (globalRowIdx + globalRowIdx / h + 1) * (w+1) + 1;
outputRow[-1] = 0;
double sum = 0;
for (int i = 0; i < w; ++i) {
sum += input[globalRowIdx * w + i];
outputRow[i] = static_cast<float>(sum);
}
// need to zero the (0,0) corner of the output separately >:(
output[(globalRowIdx / h) * (w+1) * (h+1)] = 0;
}
}
|
35fc19428b7bc47b7a5c98bbd575c98877945e0e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../../gpu_lib/header.h"
#include "../../utils/header.h"
#include <cstdio>
#include <cstdlib>
namespace ftxj {
__device__ inline float __ReLU(float x){
return x<0.0?0.0:x>32.0?32.0:x;
};
#define MINIBATCH 8
#define UNROLL 8
__global__ void n16384l1_kernel(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
int* __restrict__ index,
int* categories,
int* active,
int batch,
int neuron,
float bias) {
extern __shared__ float shared[];
int start_idx = index[blockIdx.y];
int col_gropu = threadIdx.x / 16;
int last_load = ((neuron / 16) % 7) * 16 + 16;
int load_num = (blockIdx.y + 1) == gridDim.y ? last_load : 128;
for(int n = threadIdx.x; n < load_num; n += blockDim.x){
for(int f = 0; f < MINIBATCH; ++f) {
shared[f * 128 + n] = A[(blockIdx.x * MINIBATCH + f) * neuron + (start_idx + n) % neuron];
}
}
__syncthreads();
int last_thread = (neuron % 112);
if(col_gropu == 7 || ((blockIdx.y + 1) == gridDim.y && threadIdx.x >= last_thread)) return;
float res[MINIBATCH] = {0.0};
for(int r = 0; r < 32; ++r) {
float val = B[(blockIdx.y * 128 * 32) + r * 128 + threadIdx.x];
int idx = col_gropu * 16 + r;
for(int f = 0; f < MINIBATCH / UNROLL; ++f) {
res[0 + f * UNROLL] += shared[(f * UNROLL + 0) * 128 + idx] * val;
res[1 + f * UNROLL] += shared[(f * UNROLL + 1) * 128 + idx] * val;
res[2 + f * UNROLL] += shared[(f * UNROLL + 2) * 128 + idx] * val;
res[3 + f * UNROLL] += shared[(f * UNROLL + 3) * 128 + idx] * val;
res[4 + f * UNROLL] += shared[(f * UNROLL + 4) * 128 + idx] * val;
res[5 + f * UNROLL] += shared[(f * UNROLL + 5) * 128 + idx] * val;
res[6 + f * UNROLL] += shared[(f * UNROLL + 6) * 128 + idx] * val;
res[7 + f * UNROLL] += shared[(f * UNROLL + 7) * 128 + idx] * val;
}
}
__syncthreads();
for(int f = 0; f < MINIBATCH; ++f) {
// && blockIdx.x * MINIBATCH + f < batch; ++f) {
if(C[(blockIdx.x * MINIBATCH + f) * neuron + blockIdx.y * 112 + threadIdx.x] = __ReLU(res[f] + bias)) {
active[blockIdx.x * MINIBATCH + f] = 1;
}
}
};
__global__ void n16384_l2_l11_kernel(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
int* __restrict__ categories,
int* __restrict__ active,
int stride,
int batch,
int neuron,
float bias) {
extern __shared__ float shared[];
int start_idx1 = (blockDim.x / 16) * (blockIdx.y) * 16;
int start_idx2 = (blockDim.x / 16) * (blockIdx.y) * 16 + stride;
int load_num = stride > blockDim.x ? 32 * (blockDim.x / 16) : stride + 16 * (blockDim.x / 16);
int shared_size = ((load_num + 31) / 32) * 32;
int col_gropu = threadIdx.x / 16;
for(int n = threadIdx.x; n < load_num * MINIBATCH; n += blockDim.x){
int f = n / load_num;
int k = n % load_num;
int a_k = ((stride > blockDim.x) && (k >= blockDim.x)) ? (k - blockDim.x) + start_idx2 : k + start_idx1;
shared[f * shared_size + k] = A[categories[(blockIdx.x * MINIBATCH + f)] * neuron + (a_k) % neuron];
}
__syncthreads();
int gap = stride >= blockDim.x ? blockDim.x : stride;
float res[MINIBATCH] = {0.0};
for(int r = 0; r < 32; ++r) {
float val = B[(blockIdx.y * blockDim.x * 32) + r * blockDim.x + threadIdx.x];
int idx = col_gropu * 16 + (r >= 16? r + gap - 16 : r);
for(int f = 0; f < MINIBATCH / UNROLL; ++f) {
res[0 + f * UNROLL] += shared[(f * UNROLL + 0) * shared_size + idx] * val;
res[1 + f * UNROLL] += shared[(f * UNROLL + 1) * shared_size + idx] * val;
res[2 + f * UNROLL] += shared[(f * UNROLL + 2) * shared_size + idx] * val;
res[3 + f * UNROLL] += shared[(f * UNROLL + 3) * shared_size + idx] * val;
res[4 + f * UNROLL] += shared[(f * UNROLL + 4) * shared_size + idx] * val;
res[5 + f * UNROLL] += shared[(f * UNROLL + 5) * shared_size + idx] * val;
res[6 + f * UNROLL] += shared[(f * UNROLL + 6) * shared_size + idx] * val;
res[7 + f * UNROLL] += shared[(f * UNROLL + 7) * shared_size + idx] * val;
}
}
for(int f = 0; f < MINIBATCH ; ++f) {
if(C[(blockIdx.x * MINIBATCH + f) * neuron + blockIdx.y * blockDim.x + threadIdx.x] = __ReLU(res[f] + bias)) {
active[blockIdx.x * MINIBATCH + f] = 1;
}
}
};
#define OUT_CHANNEL 16
__global__ void n16384_l11_kernel(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
int* __restrict__ index,
int* __restrict__ active,
int batch,
int neuron,
float bias
) {
extern __shared__ float shared[];
for(int n = threadIdx.x; n < OUT_CHANNEL * 32; n += blockDim.x){
shared[n] = B[(blockIdx.y * OUT_CHANNEL * 32) + n];
}
__syncthreads();
if((blockIdx.x * blockDim.x + threadIdx.x) >= batch) return;
int begin_idx = blockIdx.y * OUT_CHANNEL / 16 * 32;
for(int o_r = 0; o_r < OUT_CHANNEL / 16; ++o_r) {
float reduce[16] = {0.0};
int idx = begin_idx + o_r * 32;
for(int r = 0; r < 32; ++r) {
int row_idx = index[idx + r];
float val = A[row_idx * batch + blockIdx.x * blockDim.x + threadIdx.x];
for(int c = 0; c < 16; ++c) {
reduce[c] += val * shared[o_r * 32 * 16 + r * 16 + c];
}
}
for(int c = 0; c < 16; ++c) {
if(C[(blockIdx.y * OUT_CHANNEL + o_r * 16 + c) * batch + blockIdx.x * blockDim.x + threadIdx.x]
= __ReLU(reduce[c] + bias)) {
active[blockIdx.x * blockDim.x + threadIdx.x] = 1;
}
}
}
};
#define TILE_DIM 32
#define BLOCK_ROWS 8
__global__ void matrix_transpose(float * __restrict__ odata, float * __restrict__ idata, int neuron, int batch) {
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM && (y + j) < batch && x < neuron; j += BLOCK_ROWS) {
tile[(threadIdx.y + j)][threadIdx.x] = idata[(y + j) * neuron + x];
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM && x < batch && y + j < neuron; j += BLOCK_ROWS) {
odata[(y+j) * batch + x] = tile[threadIdx.x][threadIdx.y + j];
}
};
__global__ void matrix_re_transpose_and_delete(
float * __restrict__ odata,
float * __restrict__ idata,
int * __restrict__ old_to_new_map,
int neuron, int batch) {
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM && x < batch; j += BLOCK_ROWS) {
tile[(threadIdx.y + j)][threadIdx.x] = idata[(y + j) * batch + x];
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // old row
y = blockIdx.x * TILE_DIM + threadIdx.y; // old batch
for (int j = 0; j < TILE_DIM && (y+j) < batch; j += BLOCK_ROWS) {
if(old_to_new_map[y + j] == -1) continue;
int tmp = old_to_new_map[y + j]; // new batch
odata[tmp * neuron + x] = tile[threadIdx.x][threadIdx.y + j];
}
};
void test_benchmark_multi_gpu_graph_challenge(
std::vector<std::vector<float>> &input,
std::vector<std::vector<float>> &weight,
std::vector<std::vector<int>> &row_access,
int batch,
int neuron,
float bias,
int gpu_index,
int batch_index
) {
GpuEnv env(gpu_index);
std::string gpu_event = "gpu_" + std::to_string(gpu_index) + "_kernel";
env.add_event(gpu_event);
std::cout << "[GPU" << gpu_index << "]......" << std::endl;
float* A; // batch parallel, cpu data
float* A_T;
float* A_d;
float* C;
float* C_d;
float **B;
float **B_d;
int **index;
int **index_d;
int* category;
int* active;
int* old_to_new_map;
int* category_d;
int* active_d;
int* old_to_new_map_d;
int this_round_batch = batch;
int layer = weight.size();
A = (float*)malloc(sizeof(float) * neuron * batch);
C = (float*)malloc(sizeof(float) * neuron * batch);
memset(C, 0, sizeof(float) * neuron * batch);
memset(A, 0, sizeof(float) * neuron * batch);
int batch_begin = batch_index * batch;
int batch_end = ::min((batch_index + 1) * batch, (int)input.size());
for(int l = batch_begin; l < batch_end; ++l) {
for(int i = 0; i < input[l].size(); ++i) {
A[(l - batch_begin) * neuron + i] = input[l][i];
}
}
B = (float**) malloc(sizeof(float*) * weight.size());
B_d = (float**) malloc(sizeof(float*) * weight.size());
for(int l = 0; l < weight.size(); ++l) {
B[l] = (float*) malloc(sizeof(float*) * weight[l].size());
for(int i = 0; i < weight[l].size(); ++i) {
B[l][i] = weight[l][i];
}
}
index = (int**) malloc(sizeof(int*) * row_access.size());
index_d = (int**) malloc(sizeof(int*) * row_access.size());
for(int l = 0; l < row_access.size(); ++l) {
index[l] = (int*) malloc(sizeof(int*) * row_access[l].size());
for(int i = 0; i < row_access[l].size(); ++i) {
index[l][i] = row_access[l][i];
}
}
category = (int*) malloc(sizeof(int*) * batch);
for(int i = 0; i < batch; ++i) {
category[i] = i;
}
old_to_new_map = (int*) malloc(sizeof(int*) * batch);
for(int i = 0; i < batch; ++i) {
old_to_new_map[i] = i;
}
active = (int*) malloc(sizeof(int*) * batch);
for(int i = 0; i < batch; ++i){
active[i] = 0;
}
Safe_Call(hipMalloc((void**)&A_d, sizeof(float) * neuron * batch));
Safe_Call(hipMemcpy(A_d, A, sizeof(float) * neuron * batch, hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&A_T, sizeof(float) * neuron * batch));
Safe_Call(hipMemset(A_T, 0, sizeof(float) * neuron * batch));
Safe_Call(hipMalloc((void**)&C_d, sizeof(float) * neuron * batch));
Safe_Call(hipMemset(C_d, 0, sizeof(float) * neuron * batch));
Safe_Call(hipMalloc((void**)&active_d, sizeof(int) * batch));
Safe_Call(hipMalloc((void**)&category_d, sizeof(int) * batch));
Safe_Call(hipMalloc((void**)&old_to_new_map_d, sizeof(int) * batch));
for(int l = 0; l < layer; ++l) {
Safe_Call(hipMalloc((void**)&(B_d[l]), sizeof(float) * weight[l].size()));
Safe_Call(hipMemcpy(B_d[l], B[l], sizeof(float) * weight[l].size(), hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&(index_d[l]), sizeof(float) * row_access[l].size()));
Safe_Call(hipMemcpy(index_d[l], index[l], sizeof(float) * row_access[l].size(), hipMemcpyHostToDevice));
}
float all_time = 0;
float all_time_min = 0;
std::map<int, int> neuron_map = {
{1024, 6},
{4096, 8},
{16384, 10}
};
std::map<int, int> stride_map = {
{1, 16},
{2, 32},
{3, 64},
{4, 128},
{5, 256},
{6, 512},
{7, 1024},
{8, 2048},
{9, 4096},
{10, 8192}
};
bool now_transpose = false;
int last_feature = batch;
int transpose_batch = 0;
for(int l = 0; l < layer; ++l) {
double need_trans_data = long(this_round_batch * neuron) / (1024.0);
need_trans_data = need_trans_data / 1024.0 * 8;
need_trans_data = need_trans_data / 1024.0;
double bandwidth = 700;
double min_time = need_trans_data / bandwidth * 1000;
auto stream = env.get_stream(gpu_event);
if(l == 9) {
Safe_Call(hipMemsetAsync(active_d, 0, sizeof(int) * batch, stream));
}
else if(l > 9) {
Safe_Call(hipMemsetAsync(active_d, 0, sizeof(int) * batch, stream));
}
else {
Safe_Call(hipMemsetAsync(active_d, 0, sizeof(int) * batch, stream));
}
env.event_start_record(gpu_event);
if(l == 0) {
int blocksize = 128;
dim3 block(blocksize);
dim3 grid((this_round_batch + MINIBATCH - 1)/ MINIBATCH, (neuron + 112 - 1) / 112);
hipLaunchKernelGGL(( n16384l1_kernel), dim3(grid), dim3(block), sizeof(float) * (MINIBATCH * (128 + 16)), stream,
A_d, B_d[l], C_d, index_d[l], category_d, active_d, this_round_batch, neuron, bias
);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("what CUDA Error: %s\n", hipGetErrorString(err));
exit(-1);
}
}
else if(l <= neuron_map[neuron] - 1){
int blocksize = 128;
dim3 block(blocksize);
dim3 grid((this_round_batch + MINIBATCH - 1)/ MINIBATCH, (neuron + blocksize - 1) / blocksize);
int stride = stride_map[l + 1];
std::cout << stride << std::endl;
int load_num = stride > blocksize ? 32 * (blocksize / 16) : stride + 16 * (blocksize / 16);
int shared_size = ((load_num + 31) / 32) * 32;
hipLaunchKernelGGL(( n16384_l2_l11_kernel), dim3(grid), dim3(block), sizeof(float) * (MINIBATCH * shared_size), stream,
A_d, B_d[l], C_d, category_d, active_d, stride, this_round_batch, neuron, bias
);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("what CUDA Error: %s\n", hipGetErrorString(err));
exit(-1);
}
}
else {
if(!now_transpose) {
transpose_batch = last_feature;
now_transpose = true;
dim3 grid((neuron + TILE_DIM - 1) / TILE_DIM, (transpose_batch + TILE_DIM - 1) / TILE_DIM);
dim3 block(TILE_DIM, BLOCK_ROWS);
hipLaunchKernelGGL(( matrix_transpose), dim3(grid), dim3(block), sizeof(float) * (TILE_DIM * TILE_DIM + TILE_DIM),
stream,
A_T, A_d, neuron, transpose_batch
);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("what CUDA Error: %s\n", hipGetErrorString(err));
exit(-1);
}
}
if(l == 22) {
std::cout << "Begin Delete" << std::endl;
dim3 grid((transpose_batch + TILE_DIM - 1) / TILE_DIM, (neuron + TILE_DIM - 1) / TILE_DIM);
dim3 block(TILE_DIM, BLOCK_ROWS);
hipLaunchKernelGGL(( matrix_re_transpose_and_delete), dim3(grid), dim3(block), sizeof(float) * (TILE_DIM * TILE_DIM + TILE_DIM),
stream,
A_d, A_T, old_to_new_map_d, neuron, transpose_batch
);
Safe_Call(hipStreamSynchronize(stream));
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("what CUDA Error: %s\n", hipGetErrorString(err));
exit(-1);
}
dim3 grid2((neuron + TILE_DIM - 1) / TILE_DIM, (this_round_batch + TILE_DIM - 1) / TILE_DIM);
dim3 block2(TILE_DIM, BLOCK_ROWS);
hipLaunchKernelGGL(( matrix_transpose), dim3(grid2), dim3(block2), sizeof(float) * (TILE_DIM * TILE_DIM + TILE_DIM),
stream,
A_T, A_d, neuron, this_round_batch
);
Safe_Call(hipStreamSynchronize(stream));
err = hipGetLastError();
if (err != hipSuccess) {
printf("what CUDA Error: %s\n", hipGetErrorString(err));
exit(-1);
}
transpose_batch = this_round_batch;
}
int blocksize = 256;
dim3 block(blocksize);
dim3 grid((transpose_batch + blocksize - 1) / blocksize, neuron / OUT_CHANNEL);
hipLaunchKernelGGL(( n16384_l11_kernel), dim3(grid), dim3(block), sizeof(float) * (OUT_CHANNEL * 32), stream,
A_T, B_d[l], C_d, index_d[l], active_d, transpose_batch, neuron, bias
);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("what CUDA Error: %s\n", hipGetErrorString(err));
exit(-1);
}
}
if(l > neuron_map[neuron] - 1) {
Safe_Call(hipMemcpyAsync(active, active_d, sizeof(int) * transpose_batch, hipMemcpyDeviceToHost, stream));
}
else {
Safe_Call(hipMemcpyAsync(active, active_d, sizeof(int) * this_round_batch, hipMemcpyDeviceToHost, stream));
}
env.event_stop_record(gpu_event);
Safe_Call(hipStreamSynchronize(stream));
int feature = 0;
if(l <= neuron_map[neuron] - 1) {
for(int k = 0; k < this_round_batch; ++k) {
if(active[k]) {
// category[feature] = category[k];
category[feature] = k;
feature++;
}
}
float* tmp = A_d;
A_d = C_d;
C_d = tmp;
}
else if(l == 21) {
int neg_1 = 0;
int have_v = 0;
for(int k = 0; k < transpose_batch; ++k) {
if(active[k]) {
old_to_new_map[k] = feature;
feature++;
have_v++;
}
else {
old_to_new_map[k] = -1;
neg_1++;
}
}
std::cout << "begin cout : ";
std::cout << neg_1 << ", " << have_v << std::endl;
float* tmp = A_T;
A_T = C_d;
C_d = tmp;
}
else {
for(int k = 0; k < batch; ++k) {
if(active[k]) {
// category[feature] = category[k];
category[feature] = k;
feature++;
}
}
float* tmp = A_T;
A_T = C_d;
C_d = tmp;
}
for(int i = 0; i < batch; ++i){
active[i] = 0;
}
last_feature = this_round_batch;
this_round_batch = feature;
if(l % 100 == 0 || l == layer)
std::cout << "[GPU " << gpu_index << "], " << "Layer " << l << ", Batch = "<< feature << std::endl;
Safe_Call(hipMemcpyAsync(category_d, category, sizeof(int) * feature, hipMemcpyHostToDevice, stream));
if(l == 21)
Safe_Call(hipMemcpyAsync(old_to_new_map_d, old_to_new_map, sizeof(int) * transpose_batch, hipMemcpyHostToDevice, stream));
float time = env.get_event_time("row-succ-20-uiuc-kernel");
if(l % 100 == 0 || l == layer)
std::cout << "Layer "<< l << " exec Time = " << time << ", " << min_time << "ms, Utilization = " << (min_time / time) << std::endl;
all_time += time;
all_time_min += min_time;
}
Safe_Call(hipMemcpy(C, C_d, sizeof(float) * neuron * batch, hipMemcpyDeviceToHost));
std::cout << "[GPU "<< gpu_index << "]" << "Kernel Exec Time [20-uiuc-row-succ] = " << all_time << "ms" <<std::endl;
std::cout << "Kernel Exec Upper Time = " << all_time_min << "ms" <<std::endl;
// CpuSpmm::run_and_cmp(coo, input, neuron, mybatch, output, false, true, true);
}
};
|
35fc19428b7bc47b7a5c98bbd575c98877945e0e.cu
|
#include <cuda.h>
#include "../../gpu_lib/header.h"
#include "../../utils/header.h"
#include <cstdio>
#include <cstdlib>
namespace ftxj {
__device__ inline float __ReLU(float x){
return x<0.0?0.0:x>32.0?32.0:x;
};
#define MINIBATCH 8
#define UNROLL 8
__global__ void n16384l1_kernel(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
int* __restrict__ index,
int* categories,
int* active,
int batch,
int neuron,
float bias) {
extern __shared__ float shared[];
int start_idx = index[blockIdx.y];
int col_gropu = threadIdx.x / 16;
int last_load = ((neuron / 16) % 7) * 16 + 16;
int load_num = (blockIdx.y + 1) == gridDim.y ? last_load : 128;
for(int n = threadIdx.x; n < load_num; n += blockDim.x){
for(int f = 0; f < MINIBATCH; ++f) {
shared[f * 128 + n] = A[(blockIdx.x * MINIBATCH + f) * neuron + (start_idx + n) % neuron];
}
}
__syncthreads();
int last_thread = (neuron % 112);
if(col_gropu == 7 || ((blockIdx.y + 1) == gridDim.y && threadIdx.x >= last_thread)) return;
float res[MINIBATCH] = {0.0};
for(int r = 0; r < 32; ++r) {
float val = B[(blockIdx.y * 128 * 32) + r * 128 + threadIdx.x];
int idx = col_gropu * 16 + r;
for(int f = 0; f < MINIBATCH / UNROLL; ++f) {
res[0 + f * UNROLL] += shared[(f * UNROLL + 0) * 128 + idx] * val;
res[1 + f * UNROLL] += shared[(f * UNROLL + 1) * 128 + idx] * val;
res[2 + f * UNROLL] += shared[(f * UNROLL + 2) * 128 + idx] * val;
res[3 + f * UNROLL] += shared[(f * UNROLL + 3) * 128 + idx] * val;
res[4 + f * UNROLL] += shared[(f * UNROLL + 4) * 128 + idx] * val;
res[5 + f * UNROLL] += shared[(f * UNROLL + 5) * 128 + idx] * val;
res[6 + f * UNROLL] += shared[(f * UNROLL + 6) * 128 + idx] * val;
res[7 + f * UNROLL] += shared[(f * UNROLL + 7) * 128 + idx] * val;
}
}
__syncthreads();
for(int f = 0; f < MINIBATCH; ++f) {
// && blockIdx.x * MINIBATCH + f < batch; ++f) {
if(C[(blockIdx.x * MINIBATCH + f) * neuron + blockIdx.y * 112 + threadIdx.x] = __ReLU(res[f] + bias)) {
active[blockIdx.x * MINIBATCH + f] = 1;
}
}
};
__global__ void n16384_l2_l11_kernel(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
int* __restrict__ categories,
int* __restrict__ active,
int stride,
int batch,
int neuron,
float bias) {
extern __shared__ float shared[];
int start_idx1 = (blockDim.x / 16) * (blockIdx.y) * 16;
int start_idx2 = (blockDim.x / 16) * (blockIdx.y) * 16 + stride;
int load_num = stride > blockDim.x ? 32 * (blockDim.x / 16) : stride + 16 * (blockDim.x / 16);
int shared_size = ((load_num + 31) / 32) * 32;
int col_gropu = threadIdx.x / 16;
for(int n = threadIdx.x; n < load_num * MINIBATCH; n += blockDim.x){
int f = n / load_num;
int k = n % load_num;
int a_k = ((stride > blockDim.x) && (k >= blockDim.x)) ? (k - blockDim.x) + start_idx2 : k + start_idx1;
shared[f * shared_size + k] = A[categories[(blockIdx.x * MINIBATCH + f)] * neuron + (a_k) % neuron];
}
__syncthreads();
int gap = stride >= blockDim.x ? blockDim.x : stride;
float res[MINIBATCH] = {0.0};
for(int r = 0; r < 32; ++r) {
float val = B[(blockIdx.y * blockDim.x * 32) + r * blockDim.x + threadIdx.x];
int idx = col_gropu * 16 + (r >= 16? r + gap - 16 : r);
for(int f = 0; f < MINIBATCH / UNROLL; ++f) {
res[0 + f * UNROLL] += shared[(f * UNROLL + 0) * shared_size + idx] * val;
res[1 + f * UNROLL] += shared[(f * UNROLL + 1) * shared_size + idx] * val;
res[2 + f * UNROLL] += shared[(f * UNROLL + 2) * shared_size + idx] * val;
res[3 + f * UNROLL] += shared[(f * UNROLL + 3) * shared_size + idx] * val;
res[4 + f * UNROLL] += shared[(f * UNROLL + 4) * shared_size + idx] * val;
res[5 + f * UNROLL] += shared[(f * UNROLL + 5) * shared_size + idx] * val;
res[6 + f * UNROLL] += shared[(f * UNROLL + 6) * shared_size + idx] * val;
res[7 + f * UNROLL] += shared[(f * UNROLL + 7) * shared_size + idx] * val;
}
}
for(int f = 0; f < MINIBATCH ; ++f) {
if(C[(blockIdx.x * MINIBATCH + f) * neuron + blockIdx.y * blockDim.x + threadIdx.x] = __ReLU(res[f] + bias)) {
active[blockIdx.x * MINIBATCH + f] = 1;
}
}
};
#define OUT_CHANNEL 16
__global__ void n16384_l11_kernel(
float * __restrict__ A,
float * __restrict__ B,
float * __restrict__ C,
int* __restrict__ index,
int* __restrict__ active,
int batch,
int neuron,
float bias
) {
extern __shared__ float shared[];
for(int n = threadIdx.x; n < OUT_CHANNEL * 32; n += blockDim.x){
shared[n] = B[(blockIdx.y * OUT_CHANNEL * 32) + n];
}
__syncthreads();
if((blockIdx.x * blockDim.x + threadIdx.x) >= batch) return;
int begin_idx = blockIdx.y * OUT_CHANNEL / 16 * 32;
for(int o_r = 0; o_r < OUT_CHANNEL / 16; ++o_r) {
float reduce[16] = {0.0};
int idx = begin_idx + o_r * 32;
for(int r = 0; r < 32; ++r) {
int row_idx = index[idx + r];
float val = A[row_idx * batch + blockIdx.x * blockDim.x + threadIdx.x];
for(int c = 0; c < 16; ++c) {
reduce[c] += val * shared[o_r * 32 * 16 + r * 16 + c];
}
}
for(int c = 0; c < 16; ++c) {
if(C[(blockIdx.y * OUT_CHANNEL + o_r * 16 + c) * batch + blockIdx.x * blockDim.x + threadIdx.x]
= __ReLU(reduce[c] + bias)) {
active[blockIdx.x * blockDim.x + threadIdx.x] = 1;
}
}
}
};
#define TILE_DIM 32
#define BLOCK_ROWS 8
__global__ void matrix_transpose(float * __restrict__ odata, float * __restrict__ idata, int neuron, int batch) {
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM && (y + j) < batch && x < neuron; j += BLOCK_ROWS) {
tile[(threadIdx.y + j)][threadIdx.x] = idata[(y + j) * neuron + x];
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM && x < batch && y + j < neuron; j += BLOCK_ROWS) {
odata[(y+j) * batch + x] = tile[threadIdx.x][threadIdx.y + j];
}
};
__global__ void matrix_re_transpose_and_delete(
float * __restrict__ odata,
float * __restrict__ idata,
int * __restrict__ old_to_new_map,
int neuron, int batch) {
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM && x < batch; j += BLOCK_ROWS) {
tile[(threadIdx.y + j)][threadIdx.x] = idata[(y + j) * batch + x];
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // old row
y = blockIdx.x * TILE_DIM + threadIdx.y; // old batch
for (int j = 0; j < TILE_DIM && (y+j) < batch; j += BLOCK_ROWS) {
if(old_to_new_map[y + j] == -1) continue;
int tmp = old_to_new_map[y + j]; // new batch
odata[tmp * neuron + x] = tile[threadIdx.x][threadIdx.y + j];
}
};
void test_benchmark_multi_gpu_graph_challenge(
std::vector<std::vector<float>> &input,
std::vector<std::vector<float>> &weight,
std::vector<std::vector<int>> &row_access,
int batch,
int neuron,
float bias,
int gpu_index,
int batch_index
) {
GpuEnv env(gpu_index);
std::string gpu_event = "gpu_" + std::to_string(gpu_index) + "_kernel";
env.add_event(gpu_event);
std::cout << "[GPU" << gpu_index << "]......" << std::endl;
float* A; // batch parallel, cpu data
float* A_T;
float* A_d;
float* C;
float* C_d;
float **B;
float **B_d;
int **index;
int **index_d;
int* category;
int* active;
int* old_to_new_map;
int* category_d;
int* active_d;
int* old_to_new_map_d;
int this_round_batch = batch;
int layer = weight.size();
A = (float*)malloc(sizeof(float) * neuron * batch);
C = (float*)malloc(sizeof(float) * neuron * batch);
memset(C, 0, sizeof(float) * neuron * batch);
memset(A, 0, sizeof(float) * neuron * batch);
int batch_begin = batch_index * batch;
int batch_end = std::min((batch_index + 1) * batch, (int)input.size());
for(int l = batch_begin; l < batch_end; ++l) {
for(int i = 0; i < input[l].size(); ++i) {
A[(l - batch_begin) * neuron + i] = input[l][i];
}
}
B = (float**) malloc(sizeof(float*) * weight.size());
B_d = (float**) malloc(sizeof(float*) * weight.size());
for(int l = 0; l < weight.size(); ++l) {
B[l] = (float*) malloc(sizeof(float*) * weight[l].size());
for(int i = 0; i < weight[l].size(); ++i) {
B[l][i] = weight[l][i];
}
}
index = (int**) malloc(sizeof(int*) * row_access.size());
index_d = (int**) malloc(sizeof(int*) * row_access.size());
for(int l = 0; l < row_access.size(); ++l) {
index[l] = (int*) malloc(sizeof(int*) * row_access[l].size());
for(int i = 0; i < row_access[l].size(); ++i) {
index[l][i] = row_access[l][i];
}
}
category = (int*) malloc(sizeof(int*) * batch);
for(int i = 0; i < batch; ++i) {
category[i] = i;
}
old_to_new_map = (int*) malloc(sizeof(int*) * batch);
for(int i = 0; i < batch; ++i) {
old_to_new_map[i] = i;
}
active = (int*) malloc(sizeof(int*) * batch);
for(int i = 0; i < batch; ++i){
active[i] = 0;
}
Safe_Call(cudaMalloc((void**)&A_d, sizeof(float) * neuron * batch));
Safe_Call(cudaMemcpy(A_d, A, sizeof(float) * neuron * batch, cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&A_T, sizeof(float) * neuron * batch));
Safe_Call(cudaMemset(A_T, 0, sizeof(float) * neuron * batch));
Safe_Call(cudaMalloc((void**)&C_d, sizeof(float) * neuron * batch));
Safe_Call(cudaMemset(C_d, 0, sizeof(float) * neuron * batch));
Safe_Call(cudaMalloc((void**)&active_d, sizeof(int) * batch));
Safe_Call(cudaMalloc((void**)&category_d, sizeof(int) * batch));
Safe_Call(cudaMalloc((void**)&old_to_new_map_d, sizeof(int) * batch));
for(int l = 0; l < layer; ++l) {
Safe_Call(cudaMalloc((void**)&(B_d[l]), sizeof(float) * weight[l].size()));
Safe_Call(cudaMemcpy(B_d[l], B[l], sizeof(float) * weight[l].size(), cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&(index_d[l]), sizeof(float) * row_access[l].size()));
Safe_Call(cudaMemcpy(index_d[l], index[l], sizeof(float) * row_access[l].size(), cudaMemcpyHostToDevice));
}
float all_time = 0;
float all_time_min = 0;
std::map<int, int> neuron_map = {
{1024, 6},
{4096, 8},
{16384, 10}
};
std::map<int, int> stride_map = {
{1, 16},
{2, 32},
{3, 64},
{4, 128},
{5, 256},
{6, 512},
{7, 1024},
{8, 2048},
{9, 4096},
{10, 8192}
};
bool now_transpose = false;
int last_feature = batch;
int transpose_batch = 0;
for(int l = 0; l < layer; ++l) {
double need_trans_data = long(this_round_batch * neuron) / (1024.0);
need_trans_data = need_trans_data / 1024.0 * 8;
need_trans_data = need_trans_data / 1024.0;
double bandwidth = 700;
double min_time = need_trans_data / bandwidth * 1000;
auto stream = env.get_stream(gpu_event);
if(l == 9) {
Safe_Call(cudaMemsetAsync(active_d, 0, sizeof(int) * batch, stream));
}
else if(l > 9) {
Safe_Call(cudaMemsetAsync(active_d, 0, sizeof(int) * batch, stream));
}
else {
Safe_Call(cudaMemsetAsync(active_d, 0, sizeof(int) * batch, stream));
}
env.event_start_record(gpu_event);
if(l == 0) {
int blocksize = 128;
dim3 block(blocksize);
dim3 grid((this_round_batch + MINIBATCH - 1)/ MINIBATCH, (neuron + 112 - 1) / 112);
n16384l1_kernel<<<grid, block, sizeof(float) * (MINIBATCH * (128 + 16)), stream>>>(
A_d, B_d[l], C_d, index_d[l], category_d, active_d, this_round_batch, neuron, bias
);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("what CUDA Error: %s\n", cudaGetErrorString(err));
exit(-1);
}
}
else if(l <= neuron_map[neuron] - 1){
int blocksize = 128;
dim3 block(blocksize);
dim3 grid((this_round_batch + MINIBATCH - 1)/ MINIBATCH, (neuron + blocksize - 1) / blocksize);
int stride = stride_map[l + 1];
std::cout << stride << std::endl;
int load_num = stride > blocksize ? 32 * (blocksize / 16) : stride + 16 * (blocksize / 16);
int shared_size = ((load_num + 31) / 32) * 32;
n16384_l2_l11_kernel<<<grid, block, sizeof(float) * (MINIBATCH * shared_size), stream>>>(
A_d, B_d[l], C_d, category_d, active_d, stride, this_round_batch, neuron, bias
);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("what CUDA Error: %s\n", cudaGetErrorString(err));
exit(-1);
}
}
else {
if(!now_transpose) {
transpose_batch = last_feature;
now_transpose = true;
dim3 grid((neuron + TILE_DIM - 1) / TILE_DIM, (transpose_batch + TILE_DIM - 1) / TILE_DIM);
dim3 block(TILE_DIM, BLOCK_ROWS);
matrix_transpose<<<grid, block, sizeof(float) * (TILE_DIM * TILE_DIM + TILE_DIM),
stream>>>(
A_T, A_d, neuron, transpose_batch
);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("what CUDA Error: %s\n", cudaGetErrorString(err));
exit(-1);
}
}
if(l == 22) {
std::cout << "Begin Delete" << std::endl;
dim3 grid((transpose_batch + TILE_DIM - 1) / TILE_DIM, (neuron + TILE_DIM - 1) / TILE_DIM);
dim3 block(TILE_DIM, BLOCK_ROWS);
matrix_re_transpose_and_delete<<<grid, block, sizeof(float) * (TILE_DIM * TILE_DIM + TILE_DIM),
stream>>>(
A_d, A_T, old_to_new_map_d, neuron, transpose_batch
);
Safe_Call(cudaStreamSynchronize(stream));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("what CUDA Error: %s\n", cudaGetErrorString(err));
exit(-1);
}
dim3 grid2((neuron + TILE_DIM - 1) / TILE_DIM, (this_round_batch + TILE_DIM - 1) / TILE_DIM);
dim3 block2(TILE_DIM, BLOCK_ROWS);
matrix_transpose<<<grid2, block2, sizeof(float) * (TILE_DIM * TILE_DIM + TILE_DIM),
stream>>>(
A_T, A_d, neuron, this_round_batch
);
Safe_Call(cudaStreamSynchronize(stream));
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("what CUDA Error: %s\n", cudaGetErrorString(err));
exit(-1);
}
transpose_batch = this_round_batch;
}
int blocksize = 256;
dim3 block(blocksize);
dim3 grid((transpose_batch + blocksize - 1) / blocksize, neuron / OUT_CHANNEL);
n16384_l11_kernel<<<grid, block, sizeof(float) * (OUT_CHANNEL * 32), stream>>>(
A_T, B_d[l], C_d, index_d[l], active_d, transpose_batch, neuron, bias
);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("what CUDA Error: %s\n", cudaGetErrorString(err));
exit(-1);
}
}
if(l > neuron_map[neuron] - 1) {
Safe_Call(cudaMemcpyAsync(active, active_d, sizeof(int) * transpose_batch, cudaMemcpyDeviceToHost, stream));
}
else {
Safe_Call(cudaMemcpyAsync(active, active_d, sizeof(int) * this_round_batch, cudaMemcpyDeviceToHost, stream));
}
env.event_stop_record(gpu_event);
Safe_Call(cudaStreamSynchronize(stream));
int feature = 0;
if(l <= neuron_map[neuron] - 1) {
for(int k = 0; k < this_round_batch; ++k) {
if(active[k]) {
// category[feature] = category[k];
category[feature] = k;
feature++;
}
}
float* tmp = A_d;
A_d = C_d;
C_d = tmp;
}
else if(l == 21) {
int neg_1 = 0;
int have_v = 0;
for(int k = 0; k < transpose_batch; ++k) {
if(active[k]) {
old_to_new_map[k] = feature;
feature++;
have_v++;
}
else {
old_to_new_map[k] = -1;
neg_1++;
}
}
std::cout << "begin cout : ";
std::cout << neg_1 << ", " << have_v << std::endl;
float* tmp = A_T;
A_T = C_d;
C_d = tmp;
}
else {
for(int k = 0; k < batch; ++k) {
if(active[k]) {
// category[feature] = category[k];
category[feature] = k;
feature++;
}
}
float* tmp = A_T;
A_T = C_d;
C_d = tmp;
}
for(int i = 0; i < batch; ++i){
active[i] = 0;
}
last_feature = this_round_batch;
this_round_batch = feature;
if(l % 100 == 0 || l == layer)
std::cout << "[GPU " << gpu_index << "], " << "Layer " << l << ", Batch = "<< feature << std::endl;
Safe_Call(cudaMemcpyAsync(category_d, category, sizeof(int) * feature, cudaMemcpyHostToDevice, stream));
if(l == 21)
Safe_Call(cudaMemcpyAsync(old_to_new_map_d, old_to_new_map, sizeof(int) * transpose_batch, cudaMemcpyHostToDevice, stream));
float time = env.get_event_time("row-succ-20-uiuc-kernel");
if(l % 100 == 0 || l == layer)
std::cout << "Layer "<< l << " exec Time = " << time << ", " << min_time << "ms, Utilization = " << (min_time / time) << std::endl;
all_time += time;
all_time_min += min_time;
}
Safe_Call(cudaMemcpy(C, C_d, sizeof(float) * neuron * batch, cudaMemcpyDeviceToHost));
std::cout << "[GPU "<< gpu_index << "]" << "Kernel Exec Time [20-uiuc-row-succ] = " << all_time << "ms" <<std::endl;
std::cout << "Kernel Exec Upper Time = " << all_time_min << "ms" <<std::endl;
// CpuSpmm::run_and_cmp(coo, input, neuron, mybatch, output, false, true, true);
}
};
|
18ae9146c8d1d6840be63dd6a5c7c2c060b6aadb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_relu (const int sd, const int unit, const int bottom, const REAL alpha, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
const REAL val = a[offset_a + gid_0 + gid_1 * ld_a];
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(fmax)(val, alpha * val);
}
}
|
18ae9146c8d1d6840be63dd6a5c7c2c060b6aadb.cu
|
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_relu (const int sd, const int unit, const int bottom, const REAL alpha, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
const REAL val = a[offset_a + gid_0 + gid_1 * ld_a];
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(fmax)(val, alpha * val);
}
}
|
0cf935ba42c03849f5e10559d5db7c2970e5990b.hip
|
// !!! This is a file automatically generated by hipify!!!
/**********key*************/
/*********************/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <cstring>
#include <hip/hip_runtime.h>
#include <iomanip>
#include <time.h>
#define BYTE unsigned char
using namespace std;
class aes_block
{
public:
BYTE block[16];
};
void printBytes(BYTE b[], int len) {
int i;
for (i=0; i<len; i++)
printf("%x ", b[i]);
printf("\n");
}
void f1printBytes(BYTE b[], int len, FILE* fp) {
int i;
int shiftTab[16]={0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
for (i=0; i<len; i++)
fprintf(fp, "%02x ", b[shiftTab[i]]);
fprintf(fp, "\n");
}
int flag=0;
void f2printBytes(BYTE b[], int len, FILE* fp) {
int i;
for (i=0; i<len; i++){
fprintf(fp, "%c", b[i]);
if(b[i]=='\n')
flag++;
}
}
void f3printBytes(BYTE b[], int len, FILE* fp) {
int i;
for (i=0; i<len; i++){
if(b[i]=='\0')
return ;
fprintf(fp, "%c", b[i]);
if(b[i]=='\n')
flag++;
}
}
BYTE AES_Sbox[] =
{ /*0 1 2 3 4 5 6 7 8 9 a b c d e f */
0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76, /*0*/
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0, /*1*/
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15, /*2*/
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75, /*3*/
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84, /*4*/
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf, /*5*/
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8, /*6*/
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2, /*7*/
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73, /*8*/
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb, /*9*/
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79, /*a*/
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08, /*b*/
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a, /*c*/
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e, /*d*/
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf, /*e*/
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 /*f*/
};
__device__ void AES_SubBytes(BYTE state[], BYTE sbox[]) {
int i;
for(i = 0; i < 16; i++)
state[i] = sbox[state[i]];
}
__device__ void AES_AddRoundKey(BYTE state[], BYTE rkey[]) {
int shiftTab[16]={0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
int i;
for(i = 0; i < 16; i++)
state[i] ^= rkey[shiftTab[i]];
}
__device__ void AES_ShiftRows(BYTE state[], BYTE shifttab[]) {
BYTE h[16];
memcpy(h, state, 16);
int i;
for(i = 0; i < 16; i++)
state[i] = h[shifttab[i]];
}
__device__ void AES_MixColumns(BYTE state[], BYTE AES_xtime[]) {
int i;
for(i = 0; i < 4; i += 1) {
BYTE s0 = state[i + 0], s1 = state[i + 4];
BYTE s2 = state[i + 8], s3 = state[i + 12];
BYTE h = s0 ^ s1 ^ s2 ^ s3;
state[i + 0] ^= h ^ AES_xtime[s0 ^ s1];
state[i + 4] ^= h ^ AES_xtime[s1 ^ s2];
state[i + 8] ^= h ^ AES_xtime[s2 ^ s3];
state[i + 12] ^= h ^ AES_xtime[s3 ^ s0];
}
}
__device__ void AES_MixColumns_Inv(BYTE state[], BYTE AES_xtime[]) {
int i;
for(i = 0; i < 4; i += 1) {
BYTE s0 = state[i + 0], s1 = state[i + 4];
BYTE s2 = state[i + 8], s3 = state[i + 12];
BYTE h = s0 ^ s1 ^ s2 ^ s3;
BYTE xh = AES_xtime[h];
BYTE h1 = AES_xtime[AES_xtime[xh ^ s0 ^ s2]] ^ h;
BYTE h2 = AES_xtime[AES_xtime[xh ^ s1 ^ s3]] ^ h;
state[i + 0] ^= h1 ^ AES_xtime[s0 ^ s1];
state[i + 4] ^= h2 ^ AES_xtime[s1 ^ s2];
state[i + 8] ^= h1 ^ AES_xtime[s2 ^ s3];
state[i + 12] ^= h2 ^ AES_xtime[s3 ^ s0];
}
}
__device__ void AES_Init(BYTE AES_Sbox[], BYTE AES_ShiftRowTab[], BYTE AES_Sbox_Inv[], BYTE AES_xtime[], BYTE AES_ShiftRowTab_Inv[], BYTE AES_key[] ,BYTE key[]) {
AES_ShiftRowTab[0]=0;
AES_ShiftRowTab[1]=1;
AES_ShiftRowTab[2]=2;
AES_ShiftRowTab[3]=3;
AES_ShiftRowTab[4]=5;
AES_ShiftRowTab[5]=6;
AES_ShiftRowTab[6]=7;
AES_ShiftRowTab[7]=4;
AES_ShiftRowTab[8]=10;
AES_ShiftRowTab[9]=11;
AES_ShiftRowTab[10]=8;
AES_ShiftRowTab[11]=9;
AES_ShiftRowTab[12]=15;
AES_ShiftRowTab[13]=12;
AES_ShiftRowTab[14]=13;
AES_ShiftRowTab[15]=14;
AES_Sbox[0] = 0x63;AES_Sbox[1] = 0x7c;AES_Sbox[2] = 0x77;AES_Sbox[3] = 0x7b;AES_Sbox[4] = 0xf2;AES_Sbox[5] = 0x6b;AES_Sbox[6] = 0x6f;AES_Sbox[7] = 0xc5;AES_Sbox[8] = 0x30;AES_Sbox[9] = 0x1;AES_Sbox[10] = 0x67;AES_Sbox[11] = 0x2b;AES_Sbox[12] = 0xfe;AES_Sbox[13] = 0xd7;AES_Sbox[14] = 0xab;AES_Sbox[15] = 0x76;
AES_Sbox[16] = 0xca;AES_Sbox[17] = 0x82;AES_Sbox[18] = 0xc9;AES_Sbox[19] = 0x7d;AES_Sbox[20] = 0xfa;AES_Sbox[21] = 0x59;AES_Sbox[22] = 0x47;AES_Sbox[23] = 0xf0;AES_Sbox[24] = 0xad;AES_Sbox[25] = 0xd4;AES_Sbox[26] = 0xa2;AES_Sbox[27] = 0xaf;AES_Sbox[28] = 0x9c;AES_Sbox[29] = 0xa4;AES_Sbox[30] = 0x72;AES_Sbox[31] = 0xc0;
AES_Sbox[32] = 0xb7;AES_Sbox[33] = 0xfd;AES_Sbox[34] = 0x93;AES_Sbox[35] = 0x26;AES_Sbox[36] = 0x36;AES_Sbox[37] = 0x3f;AES_Sbox[38] = 0xf7;AES_Sbox[39] = 0xcc;AES_Sbox[40] = 0x34;AES_Sbox[41] = 0xa5;AES_Sbox[42] = 0xe5;AES_Sbox[43] = 0xf1;AES_Sbox[44] = 0x71;AES_Sbox[45] = 0xd8;AES_Sbox[46] = 0x31;AES_Sbox[47] = 0x15;
AES_Sbox[48] = 0x4;AES_Sbox[49] = 0xc7;AES_Sbox[50] = 0x23;AES_Sbox[51] = 0xc3;AES_Sbox[52] = 0x18;AES_Sbox[53] = 0x96;AES_Sbox[54] = 0x5;AES_Sbox[55] = 0x9a;AES_Sbox[56] = 0x7;AES_Sbox[57] = 0x12;AES_Sbox[58] = 0x80;AES_Sbox[59] = 0xe2;AES_Sbox[60] = 0xeb;AES_Sbox[61] = 0x27;AES_Sbox[62] = 0xb2;AES_Sbox[63] = 0x75;
AES_Sbox[64] = 0x9;AES_Sbox[65] = 0x83;AES_Sbox[66] = 0x2c;AES_Sbox[67] = 0x1a;AES_Sbox[68] = 0x1b;AES_Sbox[69] = 0x6e;AES_Sbox[70] = 0x5a;AES_Sbox[71] = 0xa0;AES_Sbox[72] = 0x52;AES_Sbox[73] = 0x3b;AES_Sbox[74] = 0xd6;AES_Sbox[75] = 0xb3;AES_Sbox[76] = 0x29;AES_Sbox[77] = 0xe3;AES_Sbox[78] = 0x2f;AES_Sbox[79] = 0x84;
AES_Sbox[80] = 0x53;AES_Sbox[81] = 0xd1;AES_Sbox[82] = 0x0;AES_Sbox[83] = 0xed;AES_Sbox[84] = 0x20;AES_Sbox[85] = 0xfc;AES_Sbox[86] = 0xb1;AES_Sbox[87] = 0x5b;AES_Sbox[88] = 0x6a;AES_Sbox[89] = 0xcb;AES_Sbox[90] = 0xbe;AES_Sbox[91] = 0x39;AES_Sbox[92] = 0x4a;AES_Sbox[93] = 0x4c;AES_Sbox[94] = 0x58;AES_Sbox[95] = 0xcf;
AES_Sbox[96] = 0xd0;AES_Sbox[97] = 0xef;AES_Sbox[98] = 0xaa;AES_Sbox[99] = 0xfb;AES_Sbox[100] = 0x43;AES_Sbox[101] = 0x4d;AES_Sbox[102] = 0x33;AES_Sbox[103] = 0x85;AES_Sbox[104] = 0x45;AES_Sbox[105] = 0xf9;AES_Sbox[106] = 0x2;AES_Sbox[107] = 0x7f;AES_Sbox[108] = 0x50;AES_Sbox[109] = 0x3c;AES_Sbox[110] = 0x9f;AES_Sbox[111] = 0xa8;
AES_Sbox[112] = 0x51;AES_Sbox[113] = 0xa3;AES_Sbox[114] = 0x40;AES_Sbox[115] = 0x8f;AES_Sbox[116] = 0x92;AES_Sbox[117] = 0x9d;AES_Sbox[118] = 0x38;AES_Sbox[119] = 0xf5;AES_Sbox[120] = 0xbc;AES_Sbox[121] = 0xb6;AES_Sbox[122] = 0xda;AES_Sbox[123] = 0x21;AES_Sbox[124] = 0x10;AES_Sbox[125] = 0xff;AES_Sbox[126] = 0xf3;AES_Sbox[127] = 0xd2;
AES_Sbox[128] = 0xcd;AES_Sbox[129] = 0xc;AES_Sbox[130] = 0x13;AES_Sbox[131] = 0xec;AES_Sbox[132] = 0x5f;AES_Sbox[133] = 0x97;AES_Sbox[134] = 0x44;AES_Sbox[135] = 0x17;AES_Sbox[136] = 0xc4;AES_Sbox[137] = 0xa7;AES_Sbox[138] = 0x7e;AES_Sbox[139] = 0x3d;AES_Sbox[140] = 0x64;AES_Sbox[141] = 0x5d;AES_Sbox[142] = 0x19;AES_Sbox[143] = 0x73;
AES_Sbox[144] = 0x60;AES_Sbox[145] = 0x81;AES_Sbox[146] = 0x4f;AES_Sbox[147] = 0xdc;AES_Sbox[148] = 0x22;AES_Sbox[149] = 0x2a;AES_Sbox[150] = 0x90;AES_Sbox[151] = 0x88;AES_Sbox[152] = 0x46;AES_Sbox[153] = 0xee;AES_Sbox[154] = 0xb8;AES_Sbox[155] = 0x14;AES_Sbox[156] = 0xde;AES_Sbox[157] = 0x5e;AES_Sbox[158] = 0xb;AES_Sbox[159] = 0xdb;
AES_Sbox[160] = 0xe0;AES_Sbox[161] = 0x32;AES_Sbox[162] = 0x3a;AES_Sbox[163] = 0xa;AES_Sbox[164] = 0x49;AES_Sbox[165] = 0x6;AES_Sbox[166] = 0x24;AES_Sbox[167] = 0x5c;AES_Sbox[168] = 0xc2;AES_Sbox[169] = 0xd3;AES_Sbox[170] = 0xac;AES_Sbox[171] = 0x62;AES_Sbox[172] = 0x91;AES_Sbox[173] = 0x95;AES_Sbox[174] = 0xe4;AES_Sbox[175] = 0x79;
AES_Sbox[176] = 0xe7;AES_Sbox[177] = 0xc8;AES_Sbox[178] = 0x37;AES_Sbox[179] = 0x6d;AES_Sbox[180] = 0x8d;AES_Sbox[181] = 0xd5;AES_Sbox[182] = 0x4e;AES_Sbox[183] = 0xa9;AES_Sbox[184] = 0x6c;AES_Sbox[185] = 0x56;AES_Sbox[186] = 0xf4;AES_Sbox[187] = 0xea;AES_Sbox[188] = 0x65;AES_Sbox[189] = 0x7a;AES_Sbox[190] = 0xae;AES_Sbox[191] = 0x8;
AES_Sbox[192] = 0xba;AES_Sbox[193] = 0x78;AES_Sbox[194] = 0x25;AES_Sbox[195] = 0x2e;AES_Sbox[196] = 0x1c;AES_Sbox[197] = 0xa6;AES_Sbox[198] = 0xb4;AES_Sbox[199] = 0xc6;AES_Sbox[200] = 0xe8;AES_Sbox[201] = 0xdd;AES_Sbox[202] = 0x74;AES_Sbox[203] = 0x1f;AES_Sbox[204] = 0x4b;AES_Sbox[205] = 0xbd;AES_Sbox[206] = 0x8b;AES_Sbox[207] = 0x8a;
AES_Sbox[208] = 0x70;AES_Sbox[209] = 0x3e;AES_Sbox[210] = 0xb5;AES_Sbox[211] = 0x66;AES_Sbox[212] = 0x48;AES_Sbox[213] = 0x3;AES_Sbox[214] = 0xf6;AES_Sbox[215] = 0xe;AES_Sbox[216] = 0x61;AES_Sbox[217] = 0x35;AES_Sbox[218] = 0x57;AES_Sbox[219] = 0xb9;AES_Sbox[220] = 0x86;AES_Sbox[221] = 0xc1;AES_Sbox[222] = 0x1d;AES_Sbox[223] = 0x9e;
AES_Sbox[224] = 0xe1;AES_Sbox[225] = 0xf8;AES_Sbox[226] = 0x98;AES_Sbox[227] = 0x11;AES_Sbox[228] = 0x69;AES_Sbox[229] = 0xd9;AES_Sbox[230] = 0x8e;AES_Sbox[231] = 0x94;AES_Sbox[232] = 0x9b;AES_Sbox[233] = 0x1e;AES_Sbox[234] = 0x87;AES_Sbox[235] = 0xe9;AES_Sbox[236] = 0xce;AES_Sbox[237] = 0x55;AES_Sbox[238] = 0x28;AES_Sbox[239] = 0xdf;
AES_Sbox[240] = 0x8c;AES_Sbox[241] = 0xa1;AES_Sbox[242] = 0x89;AES_Sbox[243] = 0xd;AES_Sbox[244] = 0xbf;AES_Sbox[245] = 0xe6;AES_Sbox[246] = 0x42;AES_Sbox[247] = 0x68;AES_Sbox[248] = 0x41;AES_Sbox[249] = 0x99;AES_Sbox[250] = 0x2d;AES_Sbox[251] = 0xf;AES_Sbox[252] = 0xb0;AES_Sbox[253] = 0x54;AES_Sbox[254] = 0xbb; AES_Sbox[255] = 0x16;
int i;
for(i = 0; i < 256; i++){
AES_Sbox_Inv[AES_Sbox[i]] = i;
}
for(i = 0; i < 16; i++)
AES_ShiftRowTab_Inv[AES_ShiftRowTab[i]] = i;
for(i = 0; i < 128; i++) {
AES_xtime[i] = i << 1;
AES_xtime[128 + i] = (i << 1) ^ 0x1b;
}
for(i=0; i<176; i++)
AES_key[i]=key[i];
}
void AES_ExpandKey(BYTE key[]) {
int kl = 16, ks=176, Rcon = 1, i, j;
BYTE temp[4], temp2[4];
for(i = kl; i < ks; i += 4) {
memcpy(temp, &key[i-4], 4);
if (i % kl == 0) {
temp2[0] = AES_Sbox[temp[1]] ^ Rcon;
temp2[1] = AES_Sbox[temp[2]];
temp2[2] = AES_Sbox[temp[3]];
temp2[3] = AES_Sbox[temp[0]];
memcpy(temp, temp2, 4);
if ((Rcon <<= 1) >= 256)
Rcon ^= 0x11b;
}
else if ((kl > 24) && (i % kl == 16)) {
temp2[0] = AES_Sbox[temp[0]];
temp2[1] = AES_Sbox[temp[1]];
temp2[2] = AES_Sbox[temp[2]];
temp2[3] = AES_Sbox[temp[3]];
memcpy(temp, temp2, 4);
}
for(j = 0; j < 4; j++)
key[i + j] = key[i + j - kl] ^ temp[j];
}
}
__global__ void AES_Encrypt(aes_block aes_block_array[], BYTE key[],int block_number) {
int global_thread_index = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ BYTE AES_ShiftRowTab[16];
__shared__ BYTE AES_Sbox[256];
__shared__ BYTE AES_ShiftRowTab_Inv[16];
__shared__ BYTE AES_Sbox_Inv[256];
__shared__ BYTE AES_xtime[256];
__shared__ BYTE AES_key[176];
int stride=blockDim.x*gridDim.x;
for(int real_thread=global_thread_index;real_thread < block_number;real_thread+=stride){
if(threadIdx.x == 0 ){
AES_Init(AES_Sbox, AES_ShiftRowTab, AES_Sbox_Inv, AES_xtime, AES_ShiftRowTab_Inv, AES_key, key);
}
__syncthreads();
BYTE block[16]; //
for(int i=0; i<16; i++){
block[i] = aes_block_array[real_thread].block[i];
}
int l = 176, i;
//
AES_AddRoundKey(block, &AES_key[0]);
for(i = 16; i < l - 16; i += 16) {
AES_SubBytes(block, AES_Sbox);
AES_ShiftRows(block, AES_ShiftRowTab);
AES_MixColumns(block, AES_xtime);
AES_AddRoundKey(block, &AES_key[i]);
}
AES_SubBytes(block, AES_Sbox);
AES_ShiftRows(block, AES_ShiftRowTab);
AES_AddRoundKey(block, &AES_key[i]);
for(int i=0; i<16; i++){
aes_block_array[real_thread].block[i] = block[i];
}
}
}
int main(int argc, char* argv[]) {
ifstream ifs;
ifs.open(argv[1], ios::binary);
if(!ifs){
cerr<<""<<endl;
exit(1);
}
ifs.seekg(0, ios::end);
int infileLength = ifs.tellg();
infileLength-=1;
ifs.seekg(0, ios::beg);
cout<<"() "<<infileLength<<endl<<" "<<infileLength/16<<endl;
int block_number = infileLength/16 ;
int number_of_zero_pending = infileLength%16;
aes_block* aes_block_array;
BYTE key[16 * 11]; //AESkey
int keyLen = 0;
int blockLen = 16;
ifstream key_fp;
key_fp.open(argv[2]);
while(key_fp.peek()!=EOF)
{
key_fp>>key[keyLen];
if(key_fp.eof())
break;
keyLen++;
}
cout<<":"<<keyLen<<endl;
switch (keyLen)
{
case 16:break;
case 24:break;
case 32:break;
default:printf("128, 192256\n"); return 0;
}
AES_ExpandKey(key);
if(number_of_zero_pending != 0)
aes_block_array = new aes_block [ block_number + 1];
else
aes_block_array = new aes_block[ block_number ];
char temp[16];
FILE* en_fp; //
// FILE* de_fp; //
en_fp = fopen(argv[3], "wb");
// de_fp = fopen(argv[4], "wb");
int shiftTab[16]={0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
for(int i=0; i<block_number; i++){
ifs.read(temp, 16);
for(int j=0; j<16; j++){
aes_block_array[i].block[shiftTab[j]] = (unsigned char)temp[j];
}
}
if(number_of_zero_pending != 0)
{
ifs.read(temp, number_of_zero_pending);
for(int j=0; j<16; j++){
aes_block_array[block_number].block[j] = (unsigned char)temp[j];
}
for(int j=1; j<=16-number_of_zero_pending; j++)
aes_block_array[block_number].block[16-j] = '\0';
block_number++;
}
hipSetDevice(0); //
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
int num_sm = prop.multiProcessorCount;
aes_block *cuda_aes_block_array;
BYTE *cuda_key;
dim3 ThreadperBlock(800);
dim3 BlockperGrid(num_sm);
hipMalloc(&cuda_aes_block_array, block_number*sizeof(class aes_block));
hipMalloc(&cuda_key,16*15*sizeof(BYTE) );
hipMemcpy(cuda_aes_block_array, aes_block_array, block_number*sizeof(class aes_block), hipMemcpyHostToDevice);
hipMemcpy(cuda_key, key, 16*15*sizeof(BYTE), hipMemcpyHostToDevice);
printf(": %d\n", block_number);
hipEvent_t start1;
hipEventCreate(&start1);
hipEvent_t stop1;
hipEventCreate(&stop1);
hipEventRecord(start1, NULL);
hipLaunchKernelGGL(( AES_Encrypt) , dim3(BlockperGrid), dim3(ThreadperBlock), 0, 0, cuda_aes_block_array, cuda_key, block_number);
hipEventRecord(stop1, NULL);
hipEventSynchronize(stop1);
float msecTotal1 = 0.0f,total;
hipEventElapsedTime(&msecTotal1, start1, stop1);
total=msecTotal1/1000;
cout<<""<<total<<endl;
long r=1<<23; //
cout<<""<<block_number/total/r<<" Gbps"<<endl;
hipMemcpy(aes_block_array, cuda_aes_block_array, block_number*sizeof(class aes_block), hipMemcpyDeviceToHost);
for(int i=0; i<block_number; i++)
f1printBytes(aes_block_array[i].block, blockLen, en_fp);
// AES_Decrypt <<< BlockperGrid, ThreadperBlock>>>(cuda_aes_block_array, cuda_key, expandKeyLen, block_number);
// hipMemcpy(aes_block_array, cuda_aes_block_array, block_number*sizeof(class aes_block), hipMemcpyDeviceToHost);
// for(int i=0; i<block_number-1; i++){
// f2printBytes(aes_block_array[i].block, blockLen, de_fp);
// }
// if(number_of_zero_pending == 0)
// f2printBytes(aes_block_array[block_number-1].block, blockLen, de_fp);
// else
// f3printBytes(aes_block_array[block_number-1].block, blockLen, de_fp);
// hipFree(cuda_aes_block_array);
// hipFree(cuda_key);
// cout<<""<<(double)(end2-start2)/CLOCKS_PER_SEC<<endl;
// fclose(en_fp);
// fclose(de_fp);
return 0;
}
|
0cf935ba42c03849f5e10559d5db7c2970e5990b.cu
|
/**********key使用共享内存*************/
/**********不断变换线程块线程***********/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <cstring>
#include <cuda.h>
#include <iomanip>
#include <time.h>
#define BYTE unsigned char
using namespace std;
class aes_block
{
public:
BYTE block[16];
};
void printBytes(BYTE b[], int len) {
int i;
for (i=0; i<len; i++)
printf("%x ", b[i]);
printf("\n");
}
void f1printBytes(BYTE b[], int len, FILE* fp) {
int i;
int shiftTab[16]={0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
for (i=0; i<len; i++)
fprintf(fp, "%02x ", b[shiftTab[i]]);
fprintf(fp, "\n");
}
int flag=0;
void f2printBytes(BYTE b[], int len, FILE* fp) {
int i;
for (i=0; i<len; i++){
fprintf(fp, "%c", b[i]);
if(b[i]=='\n')
flag++;
}
}
void f3printBytes(BYTE b[], int len, FILE* fp) {
int i;
for (i=0; i<len; i++){
if(b[i]=='\0')
return ;
fprintf(fp, "%c", b[i]);
if(b[i]=='\n')
flag++;
}
}
BYTE AES_Sbox[] =
{ /*0 1 2 3 4 5 6 7 8 9 a b c d e f */
0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76, /*0*/
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0, /*1*/
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15, /*2*/
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75, /*3*/
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84, /*4*/
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf, /*5*/
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8, /*6*/
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2, /*7*/
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73, /*8*/
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb, /*9*/
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79, /*a*/
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08, /*b*/
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a, /*c*/
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e, /*d*/
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf, /*e*/
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 /*f*/
};
__device__ void AES_SubBytes(BYTE state[], BYTE sbox[]) {
int i;
for(i = 0; i < 16; i++)
state[i] = sbox[state[i]];
}
__device__ void AES_AddRoundKey(BYTE state[], BYTE rkey[]) {
int shiftTab[16]={0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
int i;
for(i = 0; i < 16; i++)
state[i] ^= rkey[shiftTab[i]];
}
__device__ void AES_ShiftRows(BYTE state[], BYTE shifttab[]) {
BYTE h[16];
memcpy(h, state, 16);
int i;
for(i = 0; i < 16; i++)
state[i] = h[shifttab[i]];
}
__device__ void AES_MixColumns(BYTE state[], BYTE AES_xtime[]) {
int i;
for(i = 0; i < 4; i += 1) {
BYTE s0 = state[i + 0], s1 = state[i + 4];
BYTE s2 = state[i + 8], s3 = state[i + 12];
BYTE h = s0 ^ s1 ^ s2 ^ s3;
state[i + 0] ^= h ^ AES_xtime[s0 ^ s1];
state[i + 4] ^= h ^ AES_xtime[s1 ^ s2];
state[i + 8] ^= h ^ AES_xtime[s2 ^ s3];
state[i + 12] ^= h ^ AES_xtime[s3 ^ s0];
}
}
__device__ void AES_MixColumns_Inv(BYTE state[], BYTE AES_xtime[]) {
int i;
for(i = 0; i < 4; i += 1) {
BYTE s0 = state[i + 0], s1 = state[i + 4];
BYTE s2 = state[i + 8], s3 = state[i + 12];
BYTE h = s0 ^ s1 ^ s2 ^ s3;
BYTE xh = AES_xtime[h];
BYTE h1 = AES_xtime[AES_xtime[xh ^ s0 ^ s2]] ^ h;
BYTE h2 = AES_xtime[AES_xtime[xh ^ s1 ^ s3]] ^ h;
state[i + 0] ^= h1 ^ AES_xtime[s0 ^ s1];
state[i + 4] ^= h2 ^ AES_xtime[s1 ^ s2];
state[i + 8] ^= h1 ^ AES_xtime[s2 ^ s3];
state[i + 12] ^= h2 ^ AES_xtime[s3 ^ s0];
}
}
__device__ void AES_Init(BYTE AES_Sbox[], BYTE AES_ShiftRowTab[], BYTE AES_Sbox_Inv[], BYTE AES_xtime[], BYTE AES_ShiftRowTab_Inv[], BYTE AES_key[] ,BYTE key[]) {
AES_ShiftRowTab[0]=0;
AES_ShiftRowTab[1]=1;
AES_ShiftRowTab[2]=2;
AES_ShiftRowTab[3]=3;
AES_ShiftRowTab[4]=5;
AES_ShiftRowTab[5]=6;
AES_ShiftRowTab[6]=7;
AES_ShiftRowTab[7]=4;
AES_ShiftRowTab[8]=10;
AES_ShiftRowTab[9]=11;
AES_ShiftRowTab[10]=8;
AES_ShiftRowTab[11]=9;
AES_ShiftRowTab[12]=15;
AES_ShiftRowTab[13]=12;
AES_ShiftRowTab[14]=13;
AES_ShiftRowTab[15]=14;
AES_Sbox[0] = 0x63;AES_Sbox[1] = 0x7c;AES_Sbox[2] = 0x77;AES_Sbox[3] = 0x7b;AES_Sbox[4] = 0xf2;AES_Sbox[5] = 0x6b;AES_Sbox[6] = 0x6f;AES_Sbox[7] = 0xc5;AES_Sbox[8] = 0x30;AES_Sbox[9] = 0x1;AES_Sbox[10] = 0x67;AES_Sbox[11] = 0x2b;AES_Sbox[12] = 0xfe;AES_Sbox[13] = 0xd7;AES_Sbox[14] = 0xab;AES_Sbox[15] = 0x76;
AES_Sbox[16] = 0xca;AES_Sbox[17] = 0x82;AES_Sbox[18] = 0xc9;AES_Sbox[19] = 0x7d;AES_Sbox[20] = 0xfa;AES_Sbox[21] = 0x59;AES_Sbox[22] = 0x47;AES_Sbox[23] = 0xf0;AES_Sbox[24] = 0xad;AES_Sbox[25] = 0xd4;AES_Sbox[26] = 0xa2;AES_Sbox[27] = 0xaf;AES_Sbox[28] = 0x9c;AES_Sbox[29] = 0xa4;AES_Sbox[30] = 0x72;AES_Sbox[31] = 0xc0;
AES_Sbox[32] = 0xb7;AES_Sbox[33] = 0xfd;AES_Sbox[34] = 0x93;AES_Sbox[35] = 0x26;AES_Sbox[36] = 0x36;AES_Sbox[37] = 0x3f;AES_Sbox[38] = 0xf7;AES_Sbox[39] = 0xcc;AES_Sbox[40] = 0x34;AES_Sbox[41] = 0xa5;AES_Sbox[42] = 0xe5;AES_Sbox[43] = 0xf1;AES_Sbox[44] = 0x71;AES_Sbox[45] = 0xd8;AES_Sbox[46] = 0x31;AES_Sbox[47] = 0x15;
AES_Sbox[48] = 0x4;AES_Sbox[49] = 0xc7;AES_Sbox[50] = 0x23;AES_Sbox[51] = 0xc3;AES_Sbox[52] = 0x18;AES_Sbox[53] = 0x96;AES_Sbox[54] = 0x5;AES_Sbox[55] = 0x9a;AES_Sbox[56] = 0x7;AES_Sbox[57] = 0x12;AES_Sbox[58] = 0x80;AES_Sbox[59] = 0xe2;AES_Sbox[60] = 0xeb;AES_Sbox[61] = 0x27;AES_Sbox[62] = 0xb2;AES_Sbox[63] = 0x75;
AES_Sbox[64] = 0x9;AES_Sbox[65] = 0x83;AES_Sbox[66] = 0x2c;AES_Sbox[67] = 0x1a;AES_Sbox[68] = 0x1b;AES_Sbox[69] = 0x6e;AES_Sbox[70] = 0x5a;AES_Sbox[71] = 0xa0;AES_Sbox[72] = 0x52;AES_Sbox[73] = 0x3b;AES_Sbox[74] = 0xd6;AES_Sbox[75] = 0xb3;AES_Sbox[76] = 0x29;AES_Sbox[77] = 0xe3;AES_Sbox[78] = 0x2f;AES_Sbox[79] = 0x84;
AES_Sbox[80] = 0x53;AES_Sbox[81] = 0xd1;AES_Sbox[82] = 0x0;AES_Sbox[83] = 0xed;AES_Sbox[84] = 0x20;AES_Sbox[85] = 0xfc;AES_Sbox[86] = 0xb1;AES_Sbox[87] = 0x5b;AES_Sbox[88] = 0x6a;AES_Sbox[89] = 0xcb;AES_Sbox[90] = 0xbe;AES_Sbox[91] = 0x39;AES_Sbox[92] = 0x4a;AES_Sbox[93] = 0x4c;AES_Sbox[94] = 0x58;AES_Sbox[95] = 0xcf;
AES_Sbox[96] = 0xd0;AES_Sbox[97] = 0xef;AES_Sbox[98] = 0xaa;AES_Sbox[99] = 0xfb;AES_Sbox[100] = 0x43;AES_Sbox[101] = 0x4d;AES_Sbox[102] = 0x33;AES_Sbox[103] = 0x85;AES_Sbox[104] = 0x45;AES_Sbox[105] = 0xf9;AES_Sbox[106] = 0x2;AES_Sbox[107] = 0x7f;AES_Sbox[108] = 0x50;AES_Sbox[109] = 0x3c;AES_Sbox[110] = 0x9f;AES_Sbox[111] = 0xa8;
AES_Sbox[112] = 0x51;AES_Sbox[113] = 0xa3;AES_Sbox[114] = 0x40;AES_Sbox[115] = 0x8f;AES_Sbox[116] = 0x92;AES_Sbox[117] = 0x9d;AES_Sbox[118] = 0x38;AES_Sbox[119] = 0xf5;AES_Sbox[120] = 0xbc;AES_Sbox[121] = 0xb6;AES_Sbox[122] = 0xda;AES_Sbox[123] = 0x21;AES_Sbox[124] = 0x10;AES_Sbox[125] = 0xff;AES_Sbox[126] = 0xf3;AES_Sbox[127] = 0xd2;
AES_Sbox[128] = 0xcd;AES_Sbox[129] = 0xc;AES_Sbox[130] = 0x13;AES_Sbox[131] = 0xec;AES_Sbox[132] = 0x5f;AES_Sbox[133] = 0x97;AES_Sbox[134] = 0x44;AES_Sbox[135] = 0x17;AES_Sbox[136] = 0xc4;AES_Sbox[137] = 0xa7;AES_Sbox[138] = 0x7e;AES_Sbox[139] = 0x3d;AES_Sbox[140] = 0x64;AES_Sbox[141] = 0x5d;AES_Sbox[142] = 0x19;AES_Sbox[143] = 0x73;
AES_Sbox[144] = 0x60;AES_Sbox[145] = 0x81;AES_Sbox[146] = 0x4f;AES_Sbox[147] = 0xdc;AES_Sbox[148] = 0x22;AES_Sbox[149] = 0x2a;AES_Sbox[150] = 0x90;AES_Sbox[151] = 0x88;AES_Sbox[152] = 0x46;AES_Sbox[153] = 0xee;AES_Sbox[154] = 0xb8;AES_Sbox[155] = 0x14;AES_Sbox[156] = 0xde;AES_Sbox[157] = 0x5e;AES_Sbox[158] = 0xb;AES_Sbox[159] = 0xdb;
AES_Sbox[160] = 0xe0;AES_Sbox[161] = 0x32;AES_Sbox[162] = 0x3a;AES_Sbox[163] = 0xa;AES_Sbox[164] = 0x49;AES_Sbox[165] = 0x6;AES_Sbox[166] = 0x24;AES_Sbox[167] = 0x5c;AES_Sbox[168] = 0xc2;AES_Sbox[169] = 0xd3;AES_Sbox[170] = 0xac;AES_Sbox[171] = 0x62;AES_Sbox[172] = 0x91;AES_Sbox[173] = 0x95;AES_Sbox[174] = 0xe4;AES_Sbox[175] = 0x79;
AES_Sbox[176] = 0xe7;AES_Sbox[177] = 0xc8;AES_Sbox[178] = 0x37;AES_Sbox[179] = 0x6d;AES_Sbox[180] = 0x8d;AES_Sbox[181] = 0xd5;AES_Sbox[182] = 0x4e;AES_Sbox[183] = 0xa9;AES_Sbox[184] = 0x6c;AES_Sbox[185] = 0x56;AES_Sbox[186] = 0xf4;AES_Sbox[187] = 0xea;AES_Sbox[188] = 0x65;AES_Sbox[189] = 0x7a;AES_Sbox[190] = 0xae;AES_Sbox[191] = 0x8;
AES_Sbox[192] = 0xba;AES_Sbox[193] = 0x78;AES_Sbox[194] = 0x25;AES_Sbox[195] = 0x2e;AES_Sbox[196] = 0x1c;AES_Sbox[197] = 0xa6;AES_Sbox[198] = 0xb4;AES_Sbox[199] = 0xc6;AES_Sbox[200] = 0xe8;AES_Sbox[201] = 0xdd;AES_Sbox[202] = 0x74;AES_Sbox[203] = 0x1f;AES_Sbox[204] = 0x4b;AES_Sbox[205] = 0xbd;AES_Sbox[206] = 0x8b;AES_Sbox[207] = 0x8a;
AES_Sbox[208] = 0x70;AES_Sbox[209] = 0x3e;AES_Sbox[210] = 0xb5;AES_Sbox[211] = 0x66;AES_Sbox[212] = 0x48;AES_Sbox[213] = 0x3;AES_Sbox[214] = 0xf6;AES_Sbox[215] = 0xe;AES_Sbox[216] = 0x61;AES_Sbox[217] = 0x35;AES_Sbox[218] = 0x57;AES_Sbox[219] = 0xb9;AES_Sbox[220] = 0x86;AES_Sbox[221] = 0xc1;AES_Sbox[222] = 0x1d;AES_Sbox[223] = 0x9e;
AES_Sbox[224] = 0xe1;AES_Sbox[225] = 0xf8;AES_Sbox[226] = 0x98;AES_Sbox[227] = 0x11;AES_Sbox[228] = 0x69;AES_Sbox[229] = 0xd9;AES_Sbox[230] = 0x8e;AES_Sbox[231] = 0x94;AES_Sbox[232] = 0x9b;AES_Sbox[233] = 0x1e;AES_Sbox[234] = 0x87;AES_Sbox[235] = 0xe9;AES_Sbox[236] = 0xce;AES_Sbox[237] = 0x55;AES_Sbox[238] = 0x28;AES_Sbox[239] = 0xdf;
AES_Sbox[240] = 0x8c;AES_Sbox[241] = 0xa1;AES_Sbox[242] = 0x89;AES_Sbox[243] = 0xd;AES_Sbox[244] = 0xbf;AES_Sbox[245] = 0xe6;AES_Sbox[246] = 0x42;AES_Sbox[247] = 0x68;AES_Sbox[248] = 0x41;AES_Sbox[249] = 0x99;AES_Sbox[250] = 0x2d;AES_Sbox[251] = 0xf;AES_Sbox[252] = 0xb0;AES_Sbox[253] = 0x54;AES_Sbox[254] = 0xbb; AES_Sbox[255] = 0x16;
int i;
for(i = 0; i < 256; i++){
AES_Sbox_Inv[AES_Sbox[i]] = i;
}
for(i = 0; i < 16; i++)
AES_ShiftRowTab_Inv[AES_ShiftRowTab[i]] = i;
for(i = 0; i < 128; i++) {
AES_xtime[i] = i << 1;
AES_xtime[128 + i] = (i << 1) ^ 0x1b;
}
for(i=0; i<176; i++)
AES_key[i]=key[i];
}
void AES_ExpandKey(BYTE key[]) {
int kl = 16, ks=176, Rcon = 1, i, j;
BYTE temp[4], temp2[4];
for(i = kl; i < ks; i += 4) {
memcpy(temp, &key[i-4], 4);
if (i % kl == 0) {
temp2[0] = AES_Sbox[temp[1]] ^ Rcon;
temp2[1] = AES_Sbox[temp[2]];
temp2[2] = AES_Sbox[temp[3]];
temp2[3] = AES_Sbox[temp[0]];
memcpy(temp, temp2, 4);
if ((Rcon <<= 1) >= 256)
Rcon ^= 0x11b;
}
else if ((kl > 24) && (i % kl == 16)) {
temp2[0] = AES_Sbox[temp[0]];
temp2[1] = AES_Sbox[temp[1]];
temp2[2] = AES_Sbox[temp[2]];
temp2[3] = AES_Sbox[temp[3]];
memcpy(temp, temp2, 4);
}
for(j = 0; j < 4; j++)
key[i + j] = key[i + j - kl] ^ temp[j];
}
}
__global__ void AES_Encrypt(aes_block aes_block_array[], BYTE key[],int block_number) {
int global_thread_index = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ BYTE AES_ShiftRowTab[16];
__shared__ BYTE AES_Sbox[256];
__shared__ BYTE AES_ShiftRowTab_Inv[16];
__shared__ BYTE AES_Sbox_Inv[256];
__shared__ BYTE AES_xtime[256];
__shared__ BYTE AES_key[176];
int stride=blockDim.x*gridDim.x;
for(int real_thread=global_thread_index;real_thread < block_number;real_thread+=stride){
if(threadIdx.x == 0 ){
AES_Init(AES_Sbox, AES_ShiftRowTab, AES_Sbox_Inv, AES_xtime, AES_ShiftRowTab_Inv, AES_key, key);
}
__syncthreads();
BYTE block[16]; //定义一个临时存放加密数据的块
for(int i=0; i<16; i++){
block[i] = aes_block_array[real_thread].block[i];
}
int l = 176, i;
//下面开始加密
AES_AddRoundKey(block, &AES_key[0]);
for(i = 16; i < l - 16; i += 16) {
AES_SubBytes(block, AES_Sbox);
AES_ShiftRows(block, AES_ShiftRowTab);
AES_MixColumns(block, AES_xtime);
AES_AddRoundKey(block, &AES_key[i]);
}
AES_SubBytes(block, AES_Sbox);
AES_ShiftRows(block, AES_ShiftRowTab);
AES_AddRoundKey(block, &AES_key[i]);
for(int i=0; i<16; i++){
aes_block_array[real_thread].block[i] = block[i];
}
}
}
int main(int argc, char* argv[]) {
ifstream ifs;
ifs.open(argv[1], ios::binary);
if(!ifs){
cerr<<"错误:无法打开加密文件"<<endl;
exit(1);
}
ifs.seekg(0, ios::end);
int infileLength = ifs.tellg();
infileLength-=1;
ifs.seekg(0, ios::beg);
cout<<"输入文件长度为(字节): "<<infileLength<<endl<<"文件块个数为: "<<infileLength/16<<endl;
int block_number = infileLength/16 ;
int number_of_zero_pending = infileLength%16;
aes_block* aes_block_array;
BYTE key[16 * 11]; //定义AES中需要的最大的key
int keyLen = 0;
int blockLen = 16;
ifstream key_fp;
key_fp.open(argv[2]);
while(key_fp.peek()!=EOF)
{
key_fp>>key[keyLen];
if(key_fp.eof())
break;
keyLen++;
}
cout<<"密码长度为(字节):"<<keyLen<<endl;
switch (keyLen)
{
case 16:break;
case 24:break;
case 32:break;
default:printf("错误:密钥需要128, 192或256字节\n"); return 0;
}
AES_ExpandKey(key);
if(number_of_zero_pending != 0)
aes_block_array = new aes_block [ block_number + 1];
else
aes_block_array = new aes_block[ block_number ];
char temp[16];
FILE* en_fp; //定义加密文件
// FILE* de_fp; //定义解密文件
en_fp = fopen(argv[3], "wb");
// de_fp = fopen(argv[4], "wb");
int shiftTab[16]={0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
for(int i=0; i<block_number; i++){
ifs.read(temp, 16);
for(int j=0; j<16; j++){
aes_block_array[i].block[shiftTab[j]] = (unsigned char)temp[j];
}
}
if(number_of_zero_pending != 0)
{
ifs.read(temp, number_of_zero_pending);
for(int j=0; j<16; j++){
aes_block_array[block_number].block[j] = (unsigned char)temp[j];
}
for(int j=1; j<=16-number_of_zero_pending; j++)
aes_block_array[block_number].block[16-j] = '\0';
block_number++;
}
cudaSetDevice(0); //选择设备
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int num_sm = prop.multiProcessorCount;
aes_block *cuda_aes_block_array;
BYTE *cuda_key;
dim3 ThreadperBlock(800);
dim3 BlockperGrid(num_sm);
cudaMalloc(&cuda_aes_block_array, block_number*sizeof(class aes_block));
cudaMalloc(&cuda_key,16*15*sizeof(BYTE) );
cudaMemcpy(cuda_aes_block_array, aes_block_array, block_number*sizeof(class aes_block), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_key, key, 16*15*sizeof(BYTE), cudaMemcpyHostToDevice);
printf("加密数据块数: %d\n", block_number);
cudaEvent_t start1;
cudaEventCreate(&start1);
cudaEvent_t stop1;
cudaEventCreate(&stop1);
cudaEventRecord(start1, NULL);
AES_Encrypt <<< BlockperGrid, ThreadperBlock>>>(cuda_aes_block_array, cuda_key, block_number);
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
float msecTotal1 = 0.0f,total;
cudaEventElapsedTime(&msecTotal1, start1, stop1);
total=msecTotal1/1000;
cout<<"加密时间:"<<total<<endl;
long r=1<<23; //单位换算常数
cout<<"吞吐量为:"<<block_number/total/r<<" Gbps"<<endl;
cudaMemcpy(aes_block_array, cuda_aes_block_array, block_number*sizeof(class aes_block), cudaMemcpyDeviceToHost);
for(int i=0; i<block_number; i++)
f1printBytes(aes_block_array[i].block, blockLen, en_fp);
// AES_Decrypt <<< BlockperGrid, ThreadperBlock>>>(cuda_aes_block_array, cuda_key, expandKeyLen, block_number);
// cudaMemcpy(aes_block_array, cuda_aes_block_array, block_number*sizeof(class aes_block), cudaMemcpyDeviceToHost);
// for(int i=0; i<block_number-1; i++){
// f2printBytes(aes_block_array[i].block, blockLen, de_fp);
// }
// if(number_of_zero_pending == 0)
// f2printBytes(aes_block_array[block_number-1].block, blockLen, de_fp);
// else
// f3printBytes(aes_block_array[block_number-1].block, blockLen, de_fp);
// cudaFree(cuda_aes_block_array);
// cudaFree(cuda_key);
// cout<<"解密时间为:"<<(double)(end2-start2)/CLOCKS_PER_SEC<<endl;
// fclose(en_fp);
// fclose(de_fp);
return 0;
}
|
d164c53734fc99e4a68ef544e113f72d3804728f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <windows.h>
#include <string>
#include <iostream>
#include <thread>
#include "Dependencies\glew\glew.h"
#include "Dependencies\freeglut\freeglut.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
using namespace std;
GLfloat r = 0.0f, g = 0.0f, b = 0.0f;
const int WIDTH = 1024, HEIGHT = 768;
int cell[WIDTH][HEIGHT];
// injection circle points x, y
int cx[9];
int cy[9];
int cx1[9];
int cy1[9];
int cx2[9];
int cy2[9];
int cx3[9];
int cy3[9];
int cx4[9];
int cy4[9];
int cx5[9];
int cy5[9];
int cx6[9]; //injectParallel() moveParallel() moveWithCuda() moveKernel()
int cy6[9];
int cx11[9];
int cy11[9];
int cx21[9];
int cy21[9];
int cx31[9];
int cy31[9];
int cx41[9];
int cy41[9];
int cx51[9];
int cy51[9];
// storage temporary data
int s0, s1, s2, s3, s4, s5, s6, s7, s8;
int s[] = { s0, s1, s2, s3, s4, s5, s6, s7, s8 };
int s10, s11, s12, s13, s14, s15, s16, s17, s18;
int si[9] = { s10, s11, s12, s13, s14, s15, s16, s17, s18 };
int sii[9];
int m[9];
int m1[9];
int m2[9];
int m3[9];
int m4[9];
int m5[9];
int m6[9]; //injectParallel() moveParallel() moveWithCuda() moveKernel()
void init() {
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(-0.5f, WIDTH - 0.5f, -0.5f, HEIGHT - 0.5f);
}
void fun(void)
{
cout << "Exiting because of outside screen or memery overstack";
}
// receive temporary data from stored in sii[] after injectionii()
void moveii() {
for (int i = 1; i < 9; i++) {
if (cx[i] > 1020 || cy[i] > 760 || cx[i] < 0 || cy[i] < 0) {
atexit(fun);
_Exit(10);
}
if (cell[cx[i]][cy[i]] == 4) {
cell[cx[i]][cy[i]] = sii[i];
switch (i) {
case 1: {
//cx[1] = x;
cy[i] = cy[i] - 1;
}
break;
case 2: {
cx[i] = cx[i] + 1;
cy[i] = cy[i] - 1;
}
break;
case 3: {
cx[i] = cx[i] + 1;
//cy[3] = y;
}
break;
case 4: {
cx[i] = cx[i] + 1;
cy[i] = cy[i] + 1;
}
break;
case 5: {
//cx[5] = x;
cy[i] = cy[i] + 1;
}
break;
case 6: {
cx[i] = cx[i] - 1;
cy[i] = cy[i] + 1;
}
break;
case 7: {
cx[i] = cx[i] - 1;
//cy[7] = y;
}
break;
case 8: {
cx[i] = cx[i] - 1;
cy[i] = cy[i] - 1;
}
break;
}//end switch
if (cx[i] > 1020 || cy[i] > 760 || cx[i] < 0 || cy[i] < 0) {
atexit(fun);
_Exit(10);
}
sii[i] = cell[cx[i]][cy[i]];
cell[cx[i]][cy[i]] = 4;
}// endif
}
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
__global__ void moveKernel_1(int* cx1, int* cy1) {
int i = threadIdx.x;
//int j = threadIdx.y;
//cx[i] = cx[i];
cy1[i] = cy1[i] - 1;
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
__global__ void moveKernel_2(int* cx2, int* cy2) {
int i = threadIdx.x;
//int j = threadIdx.y;
cx2[i] = cx2[i] + 1;
cy2[i] = cy2[i] - 1;
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
__global__ void moveKernel_3(int* cx3, int* cy3) {
int i = threadIdx.x;
//int j = threadIdx.y;
cx3[i] = cx3[i] + 1;
//cy[j] = cy[j];
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
__global__ void moveKernel_4(int* cx4, int* cy4) {
int i = threadIdx.x;
//int j = threadIdx.y;
cx4[i] = cx4[i] + 1;
cy4[i] = cy4[i] + 1;
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
__global__ void moveKernel_5(int* cx5, int* cy5) {
int i = threadIdx.x;
//int j = threadIdx.y;
//cx[i] = cx[i];
cy5[i] = cy5[i] + 1;
}
// moveWithCuda(cx,cy,size) moveKernel(cx,cy) injectParallel() moveParallel() cx6[i]cy6[i] m6[i]
__global__ void moveKernel(int* cx, int* cy) {
/*extern __shared__ int bothBuffers[];
int* ss0 = &bothBuffers[0];
int* ss1 = &bothBuffers[1];
int* ss2 = &bothBuffers[2];
int* ss3 = &bothBuffers[3];
int* ss4 = &bothBuffers[4];
int* ss5 = &bothBuffers[5];*/
int i = threadIdx.x;
cx[i] = cx[i] + 1;
cy[i] = cy[i] + 1;
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
hipError_t moveWithCuda5(int* cx, int* cy, unsigned int size, int i) {
int* dev_cx = 0;
int* dev_cy = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_cx, cx, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_cy, cy, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
switch (i) {
case 1:
moveKernel_1 << <1, size >> > (dev_cx, dev_cy);
break;
case 2:
moveKernel_2 << <1, size >> > (dev_cx, dev_cy);
break;
case 3:
moveKernel_3 << <1, size >> > (dev_cx, dev_cy);
break;
case 4:
moveKernel_4 << <1, size >> > (dev_cx, dev_cy);
break;
case 5:
moveKernel_5 << <1, size >> > (dev_cx, dev_cy);
break;
}
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(cx, dev_cx, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(cy, dev_cy, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_cx);
hipFree(dev_cy);
return cudaStatus;
}
// moveWithCuda(cx,cy,size) moveKernel(cx,cy) injectParallel() moveParallel() cx6[i]cy6[i] m6[i]
hipError_t moveWithCuda(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_cx, cx, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_cy, cy, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(cx, dev_cx, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(cy, dev_cy, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_cx);
hipFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
hipError_t moveWithCuda_1(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_cx, cx, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_cy, cy, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel_1 << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(cx, dev_cx, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(cy, dev_cy, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_cx);
hipFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
hipError_t moveWithCuda_2(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_cx, cx, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_cy, cy, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel_2 << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(cx, dev_cx, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(cy, dev_cy, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_cx);
hipFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
hipError_t moveWithCuda_3(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_cx, cx, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_cy, cy, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel_3 << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(cx, dev_cx, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(cy, dev_cy, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_cx);
hipFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
hipError_t moveWithCuda_4(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_cx, cx, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_cy, cy, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel_4 << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(cx, dev_cx, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(cy, dev_cy, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_cx);
hipFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
hipError_t moveWithCuda_5(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_cx, cx, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_cy, cy, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel_5 << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(cx, dev_cx, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(cy, dev_cy, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_cx);
hipFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void move_1() {
int size = 5;
cell[cx1[0]][cy1[0]] = m1[0];
hipError_t cudaStatus = moveWithCuda_1(cx1, cy1, size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx1[0] > 1020 || cy1[0] > 760 || cx1[0] < 0 || cy1[0] < 0) {
atexit(fun);
_Exit(10);
}
m1[0] = cell[cx1[0]][cy1[0]];
cell[cx1[0]][cy1[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void move_2() {
int size = 5;
cell[cx2[0]][cy2[0]] = m2[0];
hipError_t cudaStatus = moveWithCuda_2(cx2, cy2, size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx2[0] > 1020 || cy2[0] > 760 || cx2[0] < 0 || cy2[0] < 0) {
atexit(fun);
_Exit(10);
}
m2[0] = cell[cx2[0]][cy2[0]];
cell[cx2[0]][cy2[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void move_3() {
int size = 5;
cell[cx3[0]][cy3[0]] = m3[0];
hipError_t cudaStatus = moveWithCuda_3(cx3, cy3, size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx3[0] > 1020 || cy3[0] > 760 || cx3[0] < 0 || cy3[0] < 0) {
atexit(fun);
_Exit(10);
}
m3[0] = cell[cx3[0]][cy3[0]];
cell[cx3[0]][cy3[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void move_4() {
int size = 5;
cell[cx4[0]][cy4[0]] = m4[0];
hipError_t cudaStatus = moveWithCuda_4(cx4, cy4, size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx4[0] > 1020 || cy4[0] > 760 || cx4[0] < 0 || cy4[0] < 0) {
atexit(fun);
_Exit(10);
}
m4[0] = cell[cx4[0]][cy4[0]];
cell[cx4[0]][cy4[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void move_5() {
int size = 5;
cell[cx5[0]][cy5[0]] = m5[0];
hipError_t cudaStatus = moveWithCuda_5(cx5, cy5, size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx5[0] > 1020 || cy5[0] > 760 || cx5[0] < 0 || cy5[0] < 0) {
atexit(fun);
_Exit(10);
}
m5[0] = cell[cx5[0]][cy5[0]];
cell[cx5[0]][cy5[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
// moveWithCuda(cx,cy,size) moveKernel(cx,cy) injectParallel() moveParallel() cx6[i]cy6[i] m6[i] i=0..4 size=5
void moveParallel() {
int size = 5;
hipError_t cudaStatus;
for (int i = 0; i < 5; i++) {
if (cx6[i] > 1020 || cy6[i] > 760 || cx6[i] < 0 || cy6[i] < 0) {
atexit(fun);
_Exit(10);
}
cell[cx6[i]][cy6[i]] = m6[i];
cudaStatus = moveWithCuda(cx6, cy6, size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx6[i] > 1020 || cy6[i] > 760 || cx6[i] < 0 || cy6[i] < 0) {
atexit(fun);
_Exit(10);
}
m6[i] = cell[cx6[i]][cy6[i]];
cell[cx6[i]][cy6[i]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
}
void move5() {
int size = 5;
for (int i = 1; i < 6; i++) {
switch (i) {
case 1: {
cell[cx1[0]][cy1[0]] = m1[0];
hipError_t cudaStatus = moveWithCuda5(cx1, cy1, size, i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx1[0] > 1020 || cy1[0] > 760 || cx1[0] < 0 || cy1[0] < 0) {
atexit(fun);
_Exit(10);
}
m1[0] = cell[cx1[0]][cy1[0]];
cell[cx1[0]][cy1[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
break;
case 2: {
cell[cx2[0]][cy2[0]] = m2[0];
hipError_t cudaStatus = moveWithCuda5(cx2, cy2, size, i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx2[0] > 1020 || cy2[0] > 760 || cx2[0] < 0 || cy2[0] < 0) {
atexit(fun);
_Exit(10);
}
m2[0] = cell[cx2[0]][cy2[0]];
cell[cx2[0]][cy2[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
break;
case 3: {
cell[cx3[0]][cy3[0]] = m3[0];
hipError_t cudaStatus = moveWithCuda5(cx3, cy3, size, i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx3[0] > 1020 || cy3[0] > 760 || cx3[0] < 0 || cy3[0] < 0) {
atexit(fun);
_Exit(10);
}
m3[0] = cell[cx3[0]][cy3[0]];
cell[cx3[0]][cy3[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
break;
case 4: {
cell[cx4[0]][cy4[0]] = m4[0];
hipError_t cudaStatus = moveWithCuda5(cx4, cy4, size, i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx4[0] > 1020 || cy4[0] > 760 || cx4[0] < 0 || cy4[0] < 0) {
atexit(fun);
_Exit(10);
}
m4[0] = cell[cx4[0]][cy4[0]];
cell[cx4[0]][cy4[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
break;
case 5: {
cell[cx5[0]][cy5[0]] = m5[0];
hipError_t cudaStatus = moveWithCuda5(cx5, cy5, size, i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx5[0] > 1020 || cy5[0] > 760 || cx5[0] < 0 || cy5[0] < 0) {
atexit(fun);
_Exit(10);
}
m5[0] = cell[cx5[0]][cy5[0]];
cell[cx5[0]][cy5[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
break;
}//end switch
}
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0])
void move_m5() {
int size = 5;
for (int i = 1; i < 6; i++) {
switch (i) {
case 1: {
cell[cx11[0]][cy11[0]] = m[1];
hipError_t cudaStatus = moveWithCuda5(cx11, cy11, size, i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx11[0] > 1020 || cy11[0] > 760 || cx11[0] < 0 || cy11[0] < 0) {
atexit(fun);
_Exit(10);
}
m[1] = cell[cx11[0]][cy11[0]];
cell[cx11[0]][cy11[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
break;
case 2: {
cell[cx21[0]][cy21[0]] = m[2];
hipError_t cudaStatus = moveWithCuda5(cx21, cy21, size, i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx21[0] > 1020 || cy21[0] > 760 || cx21[0] < 0 || cy21[0] < 0) {
atexit(fun);
_Exit(10);
}
m[2] = cell[cx21[0]][cy21[0]];
cell[cx21[0]][cy21[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
break;
case 3: {
cell[cx31[0]][cy31[0]] = m[3];
hipError_t cudaStatus = moveWithCuda5(cx31, cy31, size, i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx3[0] > 1020 || cy3[0] > 760 || cx3[0] < 0 || cy3[0] < 0) {
atexit(fun);
_Exit(10);
}
m[3] = cell[cx31[0]][cy31[0]];
cell[cx31[0]][cy31[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
break;
case 4: {
cell[cx41[0]][cy41[0]] = m[4];
hipError_t cudaStatus = moveWithCuda5(cx41, cy41, size, i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx41[0] > 1020 || cy41[0] > 760 || cx41[0] < 0 || cy41[0] < 0) {
atexit(fun);
_Exit(10);
}
m[4] = cell[cx41[0]][cy41[0]];
cell[cx41[0]][cy41[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
break;
case 5: {
cell[cx51[0]][cy51[0]] = m[5];
hipError_t cudaStatus = moveWithCuda5(cx51, cy51, size, i);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx51[0] > 1020 || cy51[0] > 760 || cx51[0] < 0 || cy51[0] < 0) {
atexit(fun);
_Exit(10);
}
m[5] = cell[cx51[0]][cy51[0]];
cell[cx51[0]][cy51[0]] = 4;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
_Exit(11);
}
}
break;
}//end switch
}
}
// store temporary data in sii[] keep from Assignment2
void injectionii(int x, int y, int m, int num) {
// multipoints injection
/*x = (2 * m - 1) * x / 2;
y = y / 2;*/
// single injection
x = x;
y = y;
cx[0] = x;
cy[0] = y;
cx[1] = x;
cy[1] = y - 1;
cx[2] = x + 1;
cy[2] = y - 1;
cx[3] = x + 1;
cy[3] = y;
cx[4] = x + 1;
cy[4] = y + 1;
cx[5] = x;
cy[5] = y + 1;
cx[6] = x - 1;
cy[6] = y + 1;
cx[7] = x - 1;
cy[7] = y;
cx[8] = x - 1;
cy[8] = y - 1;
if (cell[cx[0]][cy[0]] == 3) {
if (num > 5) {
for (int i = 0; i < 9; i++) {
if (cell[cx[i]][cy[i]] == 3) {
cell[cx[i]][cy[i]] = 2;
}
}
}
else {
for (int i = 1; i <= num; i++) {
m1[i] = cell[cx[i]][cy[i]];
cell[cx[i]][cy[i]] = 4;
}
}
}
else {
//else if (cell[cx[0]][cy[0]] == 2) {
for (int i = 1; i <= num; i++) {
m1[i] = cell[cx[i]][cy[i]];
cell[cx[i]][cy[i]] = 4;
}
}
//move();
}
// moveWithCuda(cx,cy,size) moveKernel(cx,cy) injectParallel() moveParallel() cx6[i]cy6[i] m6[i] i=0..4 size=5
void injectParallel(int x1, int y1, int x2, int y2, int x3, int y3, int x4, int y4, int x5, int y5) {
int i = 0;
cx6[i] = x1;
cy6[i] = y1;
m6[i] = cell[cx6[i]][cy6[i]];
i = 1;
cx6[i] = x2;
cy6[i] = y2;
m6[i] = cell[cx6[i]][cy6[i]];
i = 2;
cx6[i] = x3;
cy6[i] = y3;
m6[i] = cell[cx6[i]][cy6[i]];
i = 3;
cx6[i] = x4;
cy6[i] = y4;
m6[i] = cell[cx6[i]][cy6[i]];
i = 4;
cx6[i] = x5;
cy6[i] = y5;
m6[i] = cell[cx6[i]][cy6[i]];
}
// injectionCuda() move_m5() moveWithCuda5(cx??, cy??, size, i) moveKernel_?(cx??,cy??) cx21,cy21..cx51[0]cy51[0]
void injectionCuda(int x, int y, int num) {
cx[0] = x;
cy[0] = y;
cx[1] = x;
cy[1] = y - 1;
cx[2] = x + 1;
cy[2] = y - 1;
cx[3] = x + 1;
cy[3] = y;
cx[4] = x + 1;
cy[4] = y + 1;
cx[5] = x;
cy[5] = y + 1;
cx[6] = x - 1;
cy[6] = y + 1;
cx[7] = x - 1;
cy[7] = y;
cx[8] = x - 1;
cy[8] = y - 1;
cx11[0] = x;
cy11[0] = y-1;
cx21[0] = x+1;
cy21[0] = y-1;
cx31[0] = x+1;
cy31[0] = y;
cx41[0] = x+1;
cy41[0] = y+1;
cx51[0] = x;
cy51[0] = y+1;
if (cell[cx[0]][cy[0]] == 3) {
if (num > 5) {
for (int i = 0; i < 9; i++) {
if (cell[cx[i]][cy[i]] == 3) {
cell[cx[i]][cy[i]] = 2;
}
}
}
else {
for (int i = 1; i <= num; i++) {
m[i] = cell[cx[i]][cy[i]];
cell[cx[i]][cy[i]] = 4;
}
}
}
else {
for (int i = 1; i <= num; i++) {
m[i] = cell[cx[i]][cy[i]];
cell[cx[i]][cy[i]] = 4;
}
}
}
void injection5(int x, int y, int num) {
cx1[0] = x;
cy1[0] = y;
m1[0] = cell[cx1[0]][cy1[0]];
cx2[0] = x;
cy2[0] = y;
m2[0] = cell[cx2[0]][cy2[0]];
cx3[0] = x;
cy3[0] = y;
m3[0] = cell[cx3[0]][cy3[0]];
cx4[0] = x;
cy4[0] = y;
m4[0] = cell[cx4[0]][cy4[0]];
cx5[0] = x;
cy5[0] = y;
m5[0] = cell[cx5[0]][cy5[0]];
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void injection_1(int x, int y, int num) {
cx1[0] = x;
cy1[0] = y;
m1[0] = cell[cx1[0]][cy1[0]];
cell[cx1[0]][cy1[0]] = 4;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void injection_2(int x, int y, int num) {
cx2[0] = x;
cy2[0] = y;
m2[0] = cell[cx2[0]][cy2[0]];
cell[cx2[0]][cy2[0]] = 4;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void injection_3(int x, int y, int num) {
cx3[0] = x;
cy3[0] = y;
m3[0] = cell[cx3[0]][cy3[0]];
cell[cx3[0]][cy3[0]] = 4;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void injection_4(int x, int y, int num) {
cx4[0] = x;
cy4[0] = y;
m4[0] = cell[cx4[0]][cy4[0]];
cell[cx4[0]][cy4[0]] = 4;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void injection_5(int x, int y, int num) {
cx5[0] = x;
cy5[0] = y;
m5[0] = cell[cx5[0]][cy5[0]];
cell[cx5[0]][cy5[0]] = 4;
}
void setup(int x, int y, int m) {
int w = (m * x) + 2;
for (int i = (w - x); i < w; i++) {
for (int j = 2; j < y + 2; j++) {
cell[i][j] = (rand() % 2 + 2); // 2,3
}
}
}
void changeColor(GLfloat red, GLfloat green, GLfloat blue) {
r = red;
g = green;
b = blue;
}
//Check status of individual cell and apply the rules: 3 is cancer, 2 is health cell, 4 is medicine
static int checkStatus(int status, int x, int y) {
int cancerNeighbours = 0;
int medicineNeighbours = 0;
for (int i = (x - 1); i < (x + 2); i++) {
if (cell[i][y - 1] == 3) {
cancerNeighbours++;
}
if (cell[i][y + 1] == 3) {
cancerNeighbours++;
}
}
if (cell[x - 1][y] == 3) {
cancerNeighbours++;
}
if (cell[x + 1][y] == 3) {
cancerNeighbours++;
}
for (int i = (x - 1); i < (x + 2); i++) {
if (cell[i][y - 1] == 4) {
medicineNeighbours++;
}
if (cell[i][y + 1] == 4) {
medicineNeighbours++;
}
}
if (cell[x - 1][y] == 4) {
medicineNeighbours++;
}
if (cell[x + 1][y] == 4) {
medicineNeighbours++;
}
if (status == 3 && medicineNeighbours >= 6) {
status = 2;
}
else if (status == 2 && cancerNeighbours >= 6) {
status = 3;
}
return status;
}
//Display individual pixels.
static void display()
{
glClear(GL_COLOR_BUFFER_BIT);
GLfloat red, green, blue;
for (int i = 5; i < (WIDTH - 5); i++) {
for (int j = 5; j < (HEIGHT - 5); j++) {
//Check the updated status of the current cell.
int cellV = checkStatus(cell[i][j], i, j);
if (cellV == 0) {
red = r;
green = 0.0f;
blue = 1.0;
cell[i][j] = 0;
}
else if (cellV == 2) {
red = r;
green = 0.4f;
blue = b;
cell[i][j] = 2;
}
else if (cellV == 3) {
red = 0.4f;
green = g;
blue = b;
cell[i][j] = 3;
}
else if (cellV == 4) {
red = 1.0f;
green = 1.0f;
blue = 0.0f;
cell[i][j] = 4;
}
glPointSize(1.0f);
glColor3f(red, green, blue);
glBegin(GL_POINTS);
glVertex2i(i, j);
glEnd();
}
}
glutSwapBuffers();
}
void update(int value) {
try {
//==test 1 ===
moveParallel();
//==test 2 ===
//move_1(); //injection_?()
//move_2(); //injection_?()
//move_3(); //injection_?()
//move_4(); //injection_?()
//move_5(); //injection_?()
//move_m5(); //injectionCuda()
//move5(); //injection5()
}
catch (...) {}
glutPostRedisplay();
glutTimerFunc(1000 / 30, update, 0);
}
int main(int argc, char** argv)
{
int x = 1020;
int y = 766;
int m = 1;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(WIDTH, HEIGHT);
glutCreateWindow("Cell Growth Simulator");
init();
setup(x, y, m);
//===== test 1 =============
injectParallel(400, 100, 400, 200, 400, 300, 400, 400, 400, 500);
//======= test 3 ===========
/*injection5(400, 400, 5);
injectionCuda(500, 500, 5);*/
//======= test 2 ===========
/*injectionCuda(500, 500, 5);
injection_1(200, 300, 1);
injection_2(300, 300, 1);
injection_3(400, 300, 1);
injection_4(500, 300, 1);
injection_5(600, 300, 1);*/
glutDisplayFunc(display);
glutTimerFunc(1000 / 30, update, 0);
changeColor(r, g, b);
glutMainLoop();
return 0;
}
|
d164c53734fc99e4a68ef544e113f72d3804728f.cu
|
#include <windows.h>
#include <string>
#include <iostream>
#include <thread>
#include "Dependencies\glew\glew.h"
#include "Dependencies\freeglut\freeglut.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
using namespace std;
GLfloat r = 0.0f, g = 0.0f, b = 0.0f;
const int WIDTH = 1024, HEIGHT = 768;
int cell[WIDTH][HEIGHT];
// injection circle points x, y
int cx[9];
int cy[9];
int cx1[9];
int cy1[9];
int cx2[9];
int cy2[9];
int cx3[9];
int cy3[9];
int cx4[9];
int cy4[9];
int cx5[9];
int cy5[9];
int cx6[9]; //injectParallel() moveParallel() moveWithCuda() moveKernel()
int cy6[9];
int cx11[9];
int cy11[9];
int cx21[9];
int cy21[9];
int cx31[9];
int cy31[9];
int cx41[9];
int cy41[9];
int cx51[9];
int cy51[9];
// storage temporary data
int s0, s1, s2, s3, s4, s5, s6, s7, s8;
int s[] = { s0, s1, s2, s3, s4, s5, s6, s7, s8 };
int s10, s11, s12, s13, s14, s15, s16, s17, s18;
int si[9] = { s10, s11, s12, s13, s14, s15, s16, s17, s18 };
int sii[9];
int m[9];
int m1[9];
int m2[9];
int m3[9];
int m4[9];
int m5[9];
int m6[9]; //injectParallel() moveParallel() moveWithCuda() moveKernel()
void init() {
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(-0.5f, WIDTH - 0.5f, -0.5f, HEIGHT - 0.5f);
}
void fun(void)
{
cout << "Exiting because of outside screen or memery overstack";
}
// receive temporary data from stored in sii[] after injectionii()
void moveii() {
for (int i = 1; i < 9; i++) {
if (cx[i] > 1020 || cy[i] > 760 || cx[i] < 0 || cy[i] < 0) {
atexit(fun);
_Exit(10);
}
if (cell[cx[i]][cy[i]] == 4) {
cell[cx[i]][cy[i]] = sii[i];
switch (i) {
case 1: {
//cx[1] = x;
cy[i] = cy[i] - 1;
}
break;
case 2: {
cx[i] = cx[i] + 1;
cy[i] = cy[i] - 1;
}
break;
case 3: {
cx[i] = cx[i] + 1;
//cy[3] = y;
}
break;
case 4: {
cx[i] = cx[i] + 1;
cy[i] = cy[i] + 1;
}
break;
case 5: {
//cx[5] = x;
cy[i] = cy[i] + 1;
}
break;
case 6: {
cx[i] = cx[i] - 1;
cy[i] = cy[i] + 1;
}
break;
case 7: {
cx[i] = cx[i] - 1;
//cy[7] = y;
}
break;
case 8: {
cx[i] = cx[i] - 1;
cy[i] = cy[i] - 1;
}
break;
}//end switch
if (cx[i] > 1020 || cy[i] > 760 || cx[i] < 0 || cy[i] < 0) {
atexit(fun);
_Exit(10);
}
sii[i] = cell[cx[i]][cy[i]];
cell[cx[i]][cy[i]] = 4;
}// endif
}
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
__global__ void moveKernel_1(int* cx1, int* cy1) {
int i = threadIdx.x;
//int j = threadIdx.y;
//cx[i] = cx[i];
cy1[i] = cy1[i] - 1;
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
__global__ void moveKernel_2(int* cx2, int* cy2) {
int i = threadIdx.x;
//int j = threadIdx.y;
cx2[i] = cx2[i] + 1;
cy2[i] = cy2[i] - 1;
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
__global__ void moveKernel_3(int* cx3, int* cy3) {
int i = threadIdx.x;
//int j = threadIdx.y;
cx3[i] = cx3[i] + 1;
//cy[j] = cy[j];
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
__global__ void moveKernel_4(int* cx4, int* cy4) {
int i = threadIdx.x;
//int j = threadIdx.y;
cx4[i] = cx4[i] + 1;
cy4[i] = cy4[i] + 1;
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
__global__ void moveKernel_5(int* cx5, int* cy5) {
int i = threadIdx.x;
//int j = threadIdx.y;
//cx[i] = cx[i];
cy5[i] = cy5[i] + 1;
}
// moveWithCuda(cx,cy,size) moveKernel(cx,cy) injectParallel() moveParallel() cx6[i]cy6[i] m6[i]
__global__ void moveKernel(int* cx, int* cy) {
/*extern __shared__ int bothBuffers[];
int* ss0 = &bothBuffers[0];
int* ss1 = &bothBuffers[1];
int* ss2 = &bothBuffers[2];
int* ss3 = &bothBuffers[3];
int* ss4 = &bothBuffers[4];
int* ss5 = &bothBuffers[5];*/
int i = threadIdx.x;
cx[i] = cx[i] + 1;
cy[i] = cy[i] + 1;
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0]) cx21,cy21..cx51[0]cy51[0]
cudaError_t moveWithCuda5(int* cx, int* cy, unsigned int size, int i) {
int* dev_cx = 0;
int* dev_cy = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_cx, cx, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_cy, cy, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
switch (i) {
case 1:
moveKernel_1 << <1, size >> > (dev_cx, dev_cy);
break;
case 2:
moveKernel_2 << <1, size >> > (dev_cx, dev_cy);
break;
case 3:
moveKernel_3 << <1, size >> > (dev_cx, dev_cy);
break;
case 4:
moveKernel_4 << <1, size >> > (dev_cx, dev_cy);
break;
case 5:
moveKernel_5 << <1, size >> > (dev_cx, dev_cy);
break;
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(cx, dev_cx, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(cy, dev_cy, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_cx);
cudaFree(dev_cy);
return cudaStatus;
}
// moveWithCuda(cx,cy,size) moveKernel(cx,cy) injectParallel() moveParallel() cx6[i]cy6[i] m6[i]
cudaError_t moveWithCuda(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_cx, cx, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_cy, cy, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(cx, dev_cx, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(cy, dev_cy, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_cx);
cudaFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
cudaError_t moveWithCuda_1(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_cx, cx, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_cy, cy, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel_1 << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(cx, dev_cx, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(cy, dev_cy, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_cx);
cudaFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
cudaError_t moveWithCuda_2(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_cx, cx, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_cy, cy, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel_2 << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(cx, dev_cx, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(cy, dev_cy, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_cx);
cudaFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
cudaError_t moveWithCuda_3(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_cx, cx, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_cy, cy, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel_3 << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(cx, dev_cx, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(cy, dev_cy, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_cx);
cudaFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
cudaError_t moveWithCuda_4(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_cx, cx, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_cy, cy, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel_4 << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(cx, dev_cx, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(cy, dev_cy, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_cx);
cudaFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
cudaError_t moveWithCuda_5(int* cx, int* cy, unsigned int size) {
int* dev_cx = 0;
int* dev_cy = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_cx, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_cy, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_cx, cx, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_cy, cy, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
moveKernel_5 << <1, size >> > (dev_cx, dev_cy);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(cx, dev_cx, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(cy, dev_cy, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_cx);
cudaFree(dev_cy);
return cudaStatus;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void move_1() {
int size = 5;
cell[cx1[0]][cy1[0]] = m1[0];
cudaError_t cudaStatus = moveWithCuda_1(cx1, cy1, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx1[0] > 1020 || cy1[0] > 760 || cx1[0] < 0 || cy1[0] < 0) {
atexit(fun);
_Exit(10);
}
m1[0] = cell[cx1[0]][cy1[0]];
cell[cx1[0]][cy1[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void move_2() {
int size = 5;
cell[cx2[0]][cy2[0]] = m2[0];
cudaError_t cudaStatus = moveWithCuda_2(cx2, cy2, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx2[0] > 1020 || cy2[0] > 760 || cx2[0] < 0 || cy2[0] < 0) {
atexit(fun);
_Exit(10);
}
m2[0] = cell[cx2[0]][cy2[0]];
cell[cx2[0]][cy2[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void move_3() {
int size = 5;
cell[cx3[0]][cy3[0]] = m3[0];
cudaError_t cudaStatus = moveWithCuda_3(cx3, cy3, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx3[0] > 1020 || cy3[0] > 760 || cx3[0] < 0 || cy3[0] < 0) {
atexit(fun);
_Exit(10);
}
m3[0] = cell[cx3[0]][cy3[0]];
cell[cx3[0]][cy3[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void move_4() {
int size = 5;
cell[cx4[0]][cy4[0]] = m4[0];
cudaError_t cudaStatus = moveWithCuda_4(cx4, cy4, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx4[0] > 1020 || cy4[0] > 760 || cx4[0] < 0 || cy4[0] < 0) {
atexit(fun);
_Exit(10);
}
m4[0] = cell[cx4[0]][cy4[0]];
cell[cx4[0]][cy4[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void move_5() {
int size = 5;
cell[cx5[0]][cy5[0]] = m5[0];
cudaError_t cudaStatus = moveWithCuda_5(cx5, cy5, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx5[0] > 1020 || cy5[0] > 760 || cx5[0] < 0 || cy5[0] < 0) {
atexit(fun);
_Exit(10);
}
m5[0] = cell[cx5[0]][cy5[0]];
cell[cx5[0]][cy5[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
// moveWithCuda(cx,cy,size) moveKernel(cx,cy) injectParallel() moveParallel() cx6[i]cy6[i] m6[i] i=0..4 size=5
void moveParallel() {
int size = 5;
cudaError_t cudaStatus;
for (int i = 0; i < 5; i++) {
if (cx6[i] > 1020 || cy6[i] > 760 || cx6[i] < 0 || cy6[i] < 0) {
atexit(fun);
_Exit(10);
}
cell[cx6[i]][cy6[i]] = m6[i];
cudaStatus = moveWithCuda(cx6, cy6, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx6[i] > 1020 || cy6[i] > 760 || cx6[i] < 0 || cy6[i] < 0) {
atexit(fun);
_Exit(10);
}
m6[i] = cell[cx6[i]][cy6[i]];
cell[cx6[i]][cy6[i]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
}
void move5() {
int size = 5;
for (int i = 1; i < 6; i++) {
switch (i) {
case 1: {
cell[cx1[0]][cy1[0]] = m1[0];
cudaError_t cudaStatus = moveWithCuda5(cx1, cy1, size, i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx1[0] > 1020 || cy1[0] > 760 || cx1[0] < 0 || cy1[0] < 0) {
atexit(fun);
_Exit(10);
}
m1[0] = cell[cx1[0]][cy1[0]];
cell[cx1[0]][cy1[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
break;
case 2: {
cell[cx2[0]][cy2[0]] = m2[0];
cudaError_t cudaStatus = moveWithCuda5(cx2, cy2, size, i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx2[0] > 1020 || cy2[0] > 760 || cx2[0] < 0 || cy2[0] < 0) {
atexit(fun);
_Exit(10);
}
m2[0] = cell[cx2[0]][cy2[0]];
cell[cx2[0]][cy2[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
break;
case 3: {
cell[cx3[0]][cy3[0]] = m3[0];
cudaError_t cudaStatus = moveWithCuda5(cx3, cy3, size, i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx3[0] > 1020 || cy3[0] > 760 || cx3[0] < 0 || cy3[0] < 0) {
atexit(fun);
_Exit(10);
}
m3[0] = cell[cx3[0]][cy3[0]];
cell[cx3[0]][cy3[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
break;
case 4: {
cell[cx4[0]][cy4[0]] = m4[0];
cudaError_t cudaStatus = moveWithCuda5(cx4, cy4, size, i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx4[0] > 1020 || cy4[0] > 760 || cx4[0] < 0 || cy4[0] < 0) {
atexit(fun);
_Exit(10);
}
m4[0] = cell[cx4[0]][cy4[0]];
cell[cx4[0]][cy4[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
break;
case 5: {
cell[cx5[0]][cy5[0]] = m5[0];
cudaError_t cudaStatus = moveWithCuda5(cx5, cy5, size, i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx5[0] > 1020 || cy5[0] > 760 || cx5[0] < 0 || cy5[0] < 0) {
atexit(fun);
_Exit(10);
}
m5[0] = cell[cx5[0]][cy5[0]];
cell[cx5[0]][cy5[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
break;
}//end switch
}
}
// injectionCuda() move_m5() moveWithCuda5(cx11, cy11, size, i) moveKernal_1...movekernal_5(cx5[0],cy5[0])
void move_m5() {
int size = 5;
for (int i = 1; i < 6; i++) {
switch (i) {
case 1: {
cell[cx11[0]][cy11[0]] = m[1];
cudaError_t cudaStatus = moveWithCuda5(cx11, cy11, size, i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx11[0] > 1020 || cy11[0] > 760 || cx11[0] < 0 || cy11[0] < 0) {
atexit(fun);
_Exit(10);
}
m[1] = cell[cx11[0]][cy11[0]];
cell[cx11[0]][cy11[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
break;
case 2: {
cell[cx21[0]][cy21[0]] = m[2];
cudaError_t cudaStatus = moveWithCuda5(cx21, cy21, size, i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx21[0] > 1020 || cy21[0] > 760 || cx21[0] < 0 || cy21[0] < 0) {
atexit(fun);
_Exit(10);
}
m[2] = cell[cx21[0]][cy21[0]];
cell[cx21[0]][cy21[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
break;
case 3: {
cell[cx31[0]][cy31[0]] = m[3];
cudaError_t cudaStatus = moveWithCuda5(cx31, cy31, size, i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx3[0] > 1020 || cy3[0] > 760 || cx3[0] < 0 || cy3[0] < 0) {
atexit(fun);
_Exit(10);
}
m[3] = cell[cx31[0]][cy31[0]];
cell[cx31[0]][cy31[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
break;
case 4: {
cell[cx41[0]][cy41[0]] = m[4];
cudaError_t cudaStatus = moveWithCuda5(cx41, cy41, size, i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx41[0] > 1020 || cy41[0] > 760 || cx41[0] < 0 || cy41[0] < 0) {
atexit(fun);
_Exit(10);
}
m[4] = cell[cx41[0]][cy41[0]];
cell[cx41[0]][cy41[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
break;
case 5: {
cell[cx51[0]][cy51[0]] = m[5];
cudaError_t cudaStatus = moveWithCuda5(cx51, cy51, size, i);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveWithCuda failed!");
_Exit(9);
}
if (cx51[0] > 1020 || cy51[0] > 760 || cx51[0] < 0 || cy51[0] < 0) {
atexit(fun);
_Exit(10);
}
m[5] = cell[cx51[0]][cy51[0]];
cell[cx51[0]][cy51[0]] = 4;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
_Exit(11);
}
}
break;
}//end switch
}
}
// store temporary data in sii[] keep from Assignment2
void injectionii(int x, int y, int m, int num) {
// multipoints injection
/*x = (2 * m - 1) * x / 2;
y = y / 2;*/
// single injection
x = x;
y = y;
cx[0] = x;
cy[0] = y;
cx[1] = x;
cy[1] = y - 1;
cx[2] = x + 1;
cy[2] = y - 1;
cx[3] = x + 1;
cy[3] = y;
cx[4] = x + 1;
cy[4] = y + 1;
cx[5] = x;
cy[5] = y + 1;
cx[6] = x - 1;
cy[6] = y + 1;
cx[7] = x - 1;
cy[7] = y;
cx[8] = x - 1;
cy[8] = y - 1;
if (cell[cx[0]][cy[0]] == 3) {
if (num > 5) {
for (int i = 0; i < 9; i++) {
if (cell[cx[i]][cy[i]] == 3) {
cell[cx[i]][cy[i]] = 2;
}
}
}
else {
for (int i = 1; i <= num; i++) {
m1[i] = cell[cx[i]][cy[i]];
cell[cx[i]][cy[i]] = 4;
}
}
}
else {
//else if (cell[cx[0]][cy[0]] == 2) {
for (int i = 1; i <= num; i++) {
m1[i] = cell[cx[i]][cy[i]];
cell[cx[i]][cy[i]] = 4;
}
}
//move();
}
// moveWithCuda(cx,cy,size) moveKernel(cx,cy) injectParallel() moveParallel() cx6[i]cy6[i] m6[i] i=0..4 size=5
void injectParallel(int x1, int y1, int x2, int y2, int x3, int y3, int x4, int y4, int x5, int y5) {
int i = 0;
cx6[i] = x1;
cy6[i] = y1;
m6[i] = cell[cx6[i]][cy6[i]];
i = 1;
cx6[i] = x2;
cy6[i] = y2;
m6[i] = cell[cx6[i]][cy6[i]];
i = 2;
cx6[i] = x3;
cy6[i] = y3;
m6[i] = cell[cx6[i]][cy6[i]];
i = 3;
cx6[i] = x4;
cy6[i] = y4;
m6[i] = cell[cx6[i]][cy6[i]];
i = 4;
cx6[i] = x5;
cy6[i] = y5;
m6[i] = cell[cx6[i]][cy6[i]];
}
// injectionCuda() move_m5() moveWithCuda5(cx??, cy??, size, i) moveKernel_?(cx??,cy??) cx21,cy21..cx51[0]cy51[0]
void injectionCuda(int x, int y, int num) {
cx[0] = x;
cy[0] = y;
cx[1] = x;
cy[1] = y - 1;
cx[2] = x + 1;
cy[2] = y - 1;
cx[3] = x + 1;
cy[3] = y;
cx[4] = x + 1;
cy[4] = y + 1;
cx[5] = x;
cy[5] = y + 1;
cx[6] = x - 1;
cy[6] = y + 1;
cx[7] = x - 1;
cy[7] = y;
cx[8] = x - 1;
cy[8] = y - 1;
cx11[0] = x;
cy11[0] = y-1;
cx21[0] = x+1;
cy21[0] = y-1;
cx31[0] = x+1;
cy31[0] = y;
cx41[0] = x+1;
cy41[0] = y+1;
cx51[0] = x;
cy51[0] = y+1;
if (cell[cx[0]][cy[0]] == 3) {
if (num > 5) {
for (int i = 0; i < 9; i++) {
if (cell[cx[i]][cy[i]] == 3) {
cell[cx[i]][cy[i]] = 2;
}
}
}
else {
for (int i = 1; i <= num; i++) {
m[i] = cell[cx[i]][cy[i]];
cell[cx[i]][cy[i]] = 4;
}
}
}
else {
for (int i = 1; i <= num; i++) {
m[i] = cell[cx[i]][cy[i]];
cell[cx[i]][cy[i]] = 4;
}
}
}
void injection5(int x, int y, int num) {
cx1[0] = x;
cy1[0] = y;
m1[0] = cell[cx1[0]][cy1[0]];
cx2[0] = x;
cy2[0] = y;
m2[0] = cell[cx2[0]][cy2[0]];
cx3[0] = x;
cy3[0] = y;
m3[0] = cell[cx3[0]][cy3[0]];
cx4[0] = x;
cy4[0] = y;
m4[0] = cell[cx4[0]][cy4[0]];
cx5[0] = x;
cy5[0] = y;
m5[0] = cell[cx5[0]][cy5[0]];
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void injection_1(int x, int y, int num) {
cx1[0] = x;
cy1[0] = y;
m1[0] = cell[cx1[0]][cy1[0]];
cell[cx1[0]][cy1[0]] = 4;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void injection_2(int x, int y, int num) {
cx2[0] = x;
cy2[0] = y;
m2[0] = cell[cx2[0]][cy2[0]];
cell[cx2[0]][cy2[0]] = 4;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void injection_3(int x, int y, int num) {
cx3[0] = x;
cy3[0] = y;
m3[0] = cell[cx3[0]][cy3[0]];
cell[cx3[0]][cy3[0]] = 4;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void injection_4(int x, int y, int num) {
cx4[0] = x;
cy4[0] = y;
m4[0] = cell[cx4[0]][cy4[0]];
cell[cx4[0]][cy4[0]] = 4;
}
// injection_?(x,y,num) move_?() moveWithCuda_?() moveKernel_?() cx?[0],cy?[0]
void injection_5(int x, int y, int num) {
cx5[0] = x;
cy5[0] = y;
m5[0] = cell[cx5[0]][cy5[0]];
cell[cx5[0]][cy5[0]] = 4;
}
void setup(int x, int y, int m) {
int w = (m * x) + 2;
for (int i = (w - x); i < w; i++) {
for (int j = 2; j < y + 2; j++) {
cell[i][j] = (rand() % 2 + 2); // 2,3
}
}
}
void changeColor(GLfloat red, GLfloat green, GLfloat blue) {
r = red;
g = green;
b = blue;
}
//Check status of individual cell and apply the rules: 3 is cancer, 2 is health cell, 4 is medicine
static int checkStatus(int status, int x, int y) {
int cancerNeighbours = 0;
int medicineNeighbours = 0;
for (int i = (x - 1); i < (x + 2); i++) {
if (cell[i][y - 1] == 3) {
cancerNeighbours++;
}
if (cell[i][y + 1] == 3) {
cancerNeighbours++;
}
}
if (cell[x - 1][y] == 3) {
cancerNeighbours++;
}
if (cell[x + 1][y] == 3) {
cancerNeighbours++;
}
for (int i = (x - 1); i < (x + 2); i++) {
if (cell[i][y - 1] == 4) {
medicineNeighbours++;
}
if (cell[i][y + 1] == 4) {
medicineNeighbours++;
}
}
if (cell[x - 1][y] == 4) {
medicineNeighbours++;
}
if (cell[x + 1][y] == 4) {
medicineNeighbours++;
}
if (status == 3 && medicineNeighbours >= 6) {
status = 2;
}
else if (status == 2 && cancerNeighbours >= 6) {
status = 3;
}
return status;
}
//Display individual pixels.
static void display()
{
glClear(GL_COLOR_BUFFER_BIT);
GLfloat red, green, blue;
for (int i = 5; i < (WIDTH - 5); i++) {
for (int j = 5; j < (HEIGHT - 5); j++) {
//Check the updated status of the current cell.
int cellV = checkStatus(cell[i][j], i, j);
if (cellV == 0) {
red = r;
green = 0.0f;
blue = 1.0;
cell[i][j] = 0;
}
else if (cellV == 2) {
red = r;
green = 0.4f;
blue = b;
cell[i][j] = 2;
}
else if (cellV == 3) {
red = 0.4f;
green = g;
blue = b;
cell[i][j] = 3;
}
else if (cellV == 4) {
red = 1.0f;
green = 1.0f;
blue = 0.0f;
cell[i][j] = 4;
}
glPointSize(1.0f);
glColor3f(red, green, blue);
glBegin(GL_POINTS);
glVertex2i(i, j);
glEnd();
}
}
glutSwapBuffers();
}
void update(int value) {
try {
//==test 1 ===
moveParallel();
//==test 2 ===
//move_1(); //injection_?()
//move_2(); //injection_?()
//move_3(); //injection_?()
//move_4(); //injection_?()
//move_5(); //injection_?()
//move_m5(); //injectionCuda()
//move5(); //injection5()
}
catch (...) {}
glutPostRedisplay();
glutTimerFunc(1000 / 30, update, 0);
}
int main(int argc, char** argv)
{
int x = 1020;
int y = 766;
int m = 1;
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(WIDTH, HEIGHT);
glutCreateWindow("Cell Growth Simulator");
init();
setup(x, y, m);
//===== test 1 =============
injectParallel(400, 100, 400, 200, 400, 300, 400, 400, 400, 500);
//======= test 3 ===========
/*injection5(400, 400, 5);
injectionCuda(500, 500, 5);*/
//======= test 2 ===========
/*injectionCuda(500, 500, 5);
injection_1(200, 300, 1);
injection_2(300, 300, 1);
injection_3(400, 300, 1);
injection_4(500, 300, 1);
injection_5(600, 300, 1);*/
glutDisplayFunc(display);
glutTimerFunc(1000 / 30, update, 0);
changeColor(r, g, b);
glutMainLoop();
return 0;
}
|
05c99dc613388fdfdd348ce781e75294c3f3b7e6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./common/helpers.h"
#define N (1024 * 1024)
#define FULL_DATA_SIZE (N * 20)
__global__ void kernel(int *a, int *b, int *c) {
int tid = threadIdx.x + blockIdx.x + blockDim.x;
if (tid < N) {
int tid1 = (tid + 1) % 256;
int tid2 = (tid + 2) % 256;
float aSum = (a[tid] + a[tid1] + a[tid2]) / 3.0f;
float bSum = (b[tid] + b[tid1] + b[tid2]) / 3.0f;
c[tid] = (aSum + bSum) / 2;
}
}
int main(void) {
hipEvent_t start, stop;
float elapsedTime;
hipStream_t stream_1;
hipStream_t stream_2;
hipStreamCreate(&stream_1);
hipStreamCreate(&stream_2);
int *host_a, *host_b, *host_c;
int *dev_a, *dev_b, *dev_c;
HANDLE_ERROR(hipMalloc((void**)&dev_a, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c, N * sizeof(int)));
HANDLE_ERROR(hipHostMalloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
HANDLE_ERROR(hipHostMalloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
HANDLE_ERROR(hipHostMalloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
for (int i = 0; i < FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
for (int i = 0; i < FULL_DATA_SIZE; i += N) {
HANDLE_ERROR(hipMemcpyAsync(dev_a, host_a + i, N * sizeof(int), hipMemcpyHostToDevice, stream));
HANDLE_ERROR(hipMemcpyAsync(dev_b, host_b + i, N * sizeof(int), hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( kernel), dim3(N / 256), dim3(256), 0, stream, dev_a, dev_b, dev_c);
HANDLE_ERROR(hipMemcpyAsync(host_c + i, dev_c, N * sizeof(int), hipMemcpyDeviceToHost, stream));
}
HANDLE_ERROR(hipStreamSynchronize(stream));
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Time taken: %3.1f ms\n", elapsedTime);
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
HANDLE_ERROR(hipHostFree(host_a));
HANDLE_ERROR(hipHostFree(host_b));
HANDLE_ERROR(hipHostFree(host_c));
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipFree(dev_b));
HANDLE_ERROR(hipFree(dev_c));
HANDLE_ERROR(hipStreamDestroy(stream));
return 0;
}
|
05c99dc613388fdfdd348ce781e75294c3f3b7e6.cu
|
#include "./common/helpers.h"
#define N (1024 * 1024)
#define FULL_DATA_SIZE (N * 20)
__global__ void kernel(int *a, int *b, int *c) {
int tid = threadIdx.x + blockIdx.x + blockDim.x;
if (tid < N) {
int tid1 = (tid + 1) % 256;
int tid2 = (tid + 2) % 256;
float aSum = (a[tid] + a[tid1] + a[tid2]) / 3.0f;
float bSum = (b[tid] + b[tid1] + b[tid2]) / 3.0f;
c[tid] = (aSum + bSum) / 2;
}
}
int main(void) {
cudaEvent_t start, stop;
float elapsedTime;
cudaStream_t stream_1;
cudaStream_t stream_2;
cudaStreamCreate(&stream_1);
cudaStreamCreate(&stream_2);
int *host_a, *host_b, *host_c;
int *dev_a, *dev_b, *dev_c;
HANDLE_ERROR(cudaMalloc((void**)&dev_a, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c, N * sizeof(int)));
HANDLE_ERROR(cudaHostAlloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
HANDLE_ERROR(cudaHostAlloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
HANDLE_ERROR(cudaHostAlloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
for (int i = 0; i < FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
for (int i = 0; i < FULL_DATA_SIZE; i += N) {
HANDLE_ERROR(cudaMemcpyAsync(dev_a, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream));
HANDLE_ERROR(cudaMemcpyAsync(dev_b, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream));
kernel<<<N / 256, 256, 0, stream>>>(dev_a, dev_b, dev_c);
HANDLE_ERROR(cudaMemcpyAsync(host_c + i, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost, stream));
}
HANDLE_ERROR(cudaStreamSynchronize(stream));
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Time taken: %3.1f ms\n", elapsedTime);
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
HANDLE_ERROR(cudaFreeHost(host_a));
HANDLE_ERROR(cudaFreeHost(host_b));
HANDLE_ERROR(cudaFreeHost(host_c));
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
HANDLE_ERROR(cudaFree(dev_c));
HANDLE_ERROR(cudaStreamDestroy(stream));
return 0;
}
|
71196178639d1d8709eb5fde2cfcbe0945472225.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ======================================================================== //
// Copyright 2018-2019 Ingo Wald //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// ======================================================================== //
#include <optix_device.h>
#include "LaunchParams.h"
using namespace osc;
namespace osc {
/*! launch parameters in constant memory, filled in by optix upon
optixLaunch (this gets filled in from the buffer we pass to
optixLaunch) */
extern "C" __constant__ LaunchParams optixLaunchParams;
//------------------------------------------------------------------------------
// closest hit and anyhit programs for radiance-type rays.
//
// Note eventually we will have to create one pair of those for each
// ray type and each geometry type we want to render; but this
// simple example doesn't use any actual geometries yet, so we only
// create a single, dummy, set of them (we do have to have at least
// one group of them to set up the SBT)
//------------------------------------------------------------------------------
extern "C" __global__ void __closesthit__radiance()
{ /*! for this simple example, this will remain empty */ }
extern "C" __global__ void __anyhit__radiance()
{ /*! for this simple example, this will remain empty */ }
//------------------------------------------------------------------------------
// miss program that gets called for any ray that did not have a
// valid intersection
//
// as with the anyhit/closest hit programs, in this example we only
// need to have _some_ dummy function to set up a valid SBT
// ------------------------------------------------------------------------------
extern "C" __global__ void __miss__radiance()
{ /*! for this simple example, this will remain empty */ }
//------------------------------------------------------------------------------
// ray gen program - the actual rendering happens in here
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__renderFrame()
{
if (optixLaunchParams.frameID == 0 &&
optixGetLaunchIndex().x == 0 &&
optixGetLaunchIndex().y == 0) {
// we could of course also have used optixGetLaunchDims to query
// the launch size, but accessing the optixLaunchParams here
// makes sure they're not getting optimized away (because
// otherwise they'd not get used)
printf("############################################\n");
printf("Hello world from OptiX 7 raygen program!\n(within a %ix%i-sized launch)\n",
optixLaunchParams.fbSize.x,
optixLaunchParams.fbSize.y);
printf("############################################\n");
}
// ------------------------------------------------------------------
// for this example, produce a simple test pattern:
// ------------------------------------------------------------------
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const int r = (ix % 256);
const int g = (iy % 256);
const int b = ((ix+iy) % 256);
// convert to 32-bit rgba value (we explicitly set alpha to 0xff
// to make stb_image_write happy ...
const uint32_t rgba = 0xff000000
| (r<<0) | (g<<8) | (b<<16);
// and write to frame buffer ...
const uint32_t fbIndex = ix+iy*optixLaunchParams.fbSize.x;
optixLaunchParams.colorBuffer[fbIndex] = rgba;
}
} // ::osc
|
71196178639d1d8709eb5fde2cfcbe0945472225.cu
|
// ======================================================================== //
// Copyright 2018-2019 Ingo Wald //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// ======================================================================== //
#include <optix_device.h>
#include "LaunchParams.h"
using namespace osc;
namespace osc {
/*! launch parameters in constant memory, filled in by optix upon
optixLaunch (this gets filled in from the buffer we pass to
optixLaunch) */
extern "C" __constant__ LaunchParams optixLaunchParams;
//------------------------------------------------------------------------------
// closest hit and anyhit programs for radiance-type rays.
//
// Note eventually we will have to create one pair of those for each
// ray type and each geometry type we want to render; but this
// simple example doesn't use any actual geometries yet, so we only
// create a single, dummy, set of them (we do have to have at least
// one group of them to set up the SBT)
//------------------------------------------------------------------------------
extern "C" __global__ void __closesthit__radiance()
{ /*! for this simple example, this will remain empty */ }
extern "C" __global__ void __anyhit__radiance()
{ /*! for this simple example, this will remain empty */ }
//------------------------------------------------------------------------------
// miss program that gets called for any ray that did not have a
// valid intersection
//
// as with the anyhit/closest hit programs, in this example we only
// need to have _some_ dummy function to set up a valid SBT
// ------------------------------------------------------------------------------
extern "C" __global__ void __miss__radiance()
{ /*! for this simple example, this will remain empty */ }
//------------------------------------------------------------------------------
// ray gen program - the actual rendering happens in here
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__renderFrame()
{
if (optixLaunchParams.frameID == 0 &&
optixGetLaunchIndex().x == 0 &&
optixGetLaunchIndex().y == 0) {
// we could of course also have used optixGetLaunchDims to query
// the launch size, but accessing the optixLaunchParams here
// makes sure they're not getting optimized away (because
// otherwise they'd not get used)
printf("############################################\n");
printf("Hello world from OptiX 7 raygen program!\n(within a %ix%i-sized launch)\n",
optixLaunchParams.fbSize.x,
optixLaunchParams.fbSize.y);
printf("############################################\n");
}
// ------------------------------------------------------------------
// for this example, produce a simple test pattern:
// ------------------------------------------------------------------
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const int r = (ix % 256);
const int g = (iy % 256);
const int b = ((ix+iy) % 256);
// convert to 32-bit rgba value (we explicitly set alpha to 0xff
// to make stb_image_write happy ...
const uint32_t rgba = 0xff000000
| (r<<0) | (g<<8) | (b<<16);
// and write to frame buffer ...
const uint32_t fbIndex = ix+iy*optixLaunchParams.fbSize.x;
optixLaunchParams.colorBuffer[fbIndex] = rgba;
}
} // ::osc
|
0336b4a51223476138cc55bfaeed342e63ea5ced.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <functional>
#include <vector>
#include <iostream>
#include <stdint.h>
#include "sa_cuda_context.hpp"
#include "param.h"
#include "sa_blake.h"
#define WN PARAM_N
#define WK PARAM_K
#define COLLISION_BIT_LENGTH (WN / (WK+1))
#define COLLISION_BYTE_LENGTH ((COLLISION_BIT_LENGTH+7)/8)
#define FINAL_FULL_WIDTH (2*COLLISION_BYTE_LENGTH+sizeof(uint32_t)*(1 << (WK)))
#define NDIGITS (WK+1)
#define DIGITBITS (WN/(NDIGITS))
#define PROOFSIZE (1u<<WK)
#define COMPRESSED_PROOFSIZE ((COLLISION_BIT_LENGTH+1)*PROOFSIZE*4/(8*sizeof(uint32_t)))
typedef unsigned int uint;
typedef unsigned char uchar;
typedef unsigned long long ulong;
typedef unsigned short ushort;
typedef uint32_t u32;
typedef struct sols_s
{
uint nr;
uint likely_invalids;
uchar valid[MAX_SOLS];
uint values[MAX_SOLS][(1 << PARAM_K)];
} sols_t;
/*
** Assuming NR_ROWS_LOG == 16, the hash table slots have this layout (length in
** bytes in parens):
**
** round 0, table 0: cnt(4) i(4) pad(0) Xi(23.0) pad(1)
** round 1, table 1: cnt(4) i(4) pad(0.5) Xi(20.5) pad(3)
** round 2, table 0: cnt(4) i(4) i(4) pad(0) Xi(18.0) pad(2)
** round 3, table 1: cnt(4) i(4) i(4) pad(0.5) Xi(15.5) pad(4)
** round 4, table 0: cnt(4) i(4) i(4) i(4) pad(0) Xi(13.0) pad(3)
** round 5, table 1: cnt(4) i(4) i(4) i(4) pad(0.5) Xi(10.5) pad(5)
** round 6, table 0: cnt(4) i(4) i(4) i(4) i(4) pad(0) Xi( 8.0) pad(4)
** round 7, table 1: cnt(4) i(4) i(4) i(4) i(4) pad(0.5) Xi( 5.5) pad(6)
** round 8, table 0: cnt(4) i(4) i(4) i(4) i(4) i(4) pad(0) Xi( 3.0) pad(5)
**
** If the first byte of Xi is 0xAB then:
** - on even rounds, 'A' is part of the colliding PREFIX, 'B' is part of Xi
** - on odd rounds, 'A' and 'B' are both part of the colliding PREFIX, but
** 'A' is considered redundant padding as it was used to compute the row #
**
** - cnt is an atomic counter keeping track of the number of used slots.
** it is used in the first slot only; subsequent slots replace it with
** 4 padding bytes
** - i encodes either the 21-bit input value (round 0) or a reference to two
** inputs from the previous round
**
** Formula for Xi length and pad length above:
** > for i in range(9):
** > xi=(200-20*i-NR_ROWS_LOG)/8.; ci=8+4*((i)/2); print xi,32-ci-xi
**
** Note that the fractional .5-byte/4-bit padding following Xi for odd rounds
** is the 4 most significant bits of the last byte of Xi.
*/
__constant__ ulong blake_iv[] =
{
0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f,
0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
};
__device__ uint32_t rowCounter0[NR_ROWS];
__device__ uint32_t rowCounter1[NR_ROWS];
__device__ blake2b_state_t blake;
__device__ sols_t sols;
/*
** Reset counters in hash table.
*/
__global__
void kernel_init_ht0()
{
rowCounter0[blockIdx.x * blockDim.x + threadIdx.x] = 0;
}
__global__
void kernel_init_ht1()
{
rowCounter1[blockIdx.x * blockDim.x + threadIdx.x] = 0;
}
/*
** If xi0,xi1,xi2,xi3 are stored consecutively in little endian then they
** represent (hex notation, group of 5 hex digits are a group of PREFIX bits):
** aa aa ab bb bb cc cc cd dd... [round 0]
** --------------------
** ...ab bb bb cc cc cd dd... [odd round]
** --------------
** ...cc cc cd dd... [next even round]
** -----
** Bytes underlined are going to be stored in the slot. Preceding bytes
** (and possibly part of the underlined bytes, depending on NR_ROWS_LOG) are
** used to compute the row number.
**
** Round 0: xi0,xi1,xi2,xi3 is a 25-byte Xi (xi3: only the low byte matter)
** Round 1: xi0,xi1,xi2 is a 23-byte Xi (incl. the colliding PREFIX nibble)
** TODO: update lines below with padding nibbles
** Round 2: xi0,xi1,xi2 is a 20-byte Xi (xi2: only the low 4 bytes matter)
** Round 3: xi0,xi1,xi2 is a 17.5-byte Xi (xi2: only the low 1.5 bytes matter)
** Round 4: xi0,xi1 is a 15-byte Xi (xi1: only the low 7 bytes matter)
** Round 5: xi0,xi1 is a 12.5-byte Xi (xi1: only the low 4.5 bytes matter)
** Round 6: xi0,xi1 is a 10-byte Xi (xi1: only the low 2 bytes matter)
** Round 7: xi0 is a 7.5-byte Xi (xi0: only the low 7.5 bytes matter)
** Round 8: xi0 is a 5-byte Xi (xi0: only the low 5 bytes matter)
**
** Return 0 if successfully stored, or 1 if the row overflowed.
*/
__device__ uint ht_store(uint round, char *ht, uint i,
ulong xi0, ulong xi1, ulong xi2, ulong xi3, uint *rowCounters)
{
uint row;
char *p;
uint cnt;
#if NR_ROWS_LOG == 16
if (!(round & 1))
row = (xi0 & 0xffff);
else
// if we have in hex: "ab cd ef..." (little endian xi0) then this
// formula computes the row as 0xdebc. it skips the 'a' nibble as it
// is part of the PREFIX. The Xi will be stored starting with "ef...";
// 'e' will be considered padding and 'f' is part of the current PREFIX
row = ((xi0 & 0xf00) << 4) | ((xi0 & 0xf00000) >> 12) |
((xi0 & 0xf) << 4) | ((xi0 & 0xf000) >> 12);
#elif NR_ROWS_LOG == 18
if (!(round & 1))
row = (xi0 & 0xffff) | ((xi0 & 0xc00000) >> 6);
else
row = ((xi0 & 0xc0000) >> 2) |
((xi0 & 0xf00) << 4) | ((xi0 & 0xf00000) >> 12) |
((xi0 & 0xf) << 4) | ((xi0 & 0xf000) >> 12);
#elif NR_ROWS_LOG == 19
if (!(round & 1))
row = (xi0 & 0xffff) | ((xi0 & 0xe00000) >> 5);
else
row = ((xi0 & 0xe0000) >> 1) |
((xi0 & 0xf00) << 4) | ((xi0 & 0xf00000) >> 12) |
((xi0 & 0xf) << 4) | ((xi0 & 0xf000) >> 12);
#elif NR_ROWS_LOG == 20
if (!(round & 1))
row = (xi0 & 0xffff) | ((xi0 & 0xf00000) >> 4);
else
row = ((xi0 & 0xf0000) >> 0) |
((xi0 & 0xf00) << 4) | ((xi0 & 0xf00000) >> 12) |
((xi0 & 0xf) << 4) | ((xi0 & 0xf000) >> 12);
#else
#error "unsupported NR_ROWS_LOG"
#endif
xi0 = (xi0 >> 16) | (xi1 << (64 - 16));
xi1 = (xi1 >> 16) | (xi2 << (64 - 16));
xi2 = (xi2 >> 16) | (xi3 << (64 - 16));
p = ht + row * NR_SLOTS * SLOT_LEN;
uint xcnt = atomicAdd(&rowCounters[row], 1);
//printf("inc index %u round %u\n", rowIdx, round);
cnt = xcnt;
//printf("row %u rowOffset %u count is %u\n", rowIdx, rowOffset, cnt);
if (cnt >= NR_SLOTS) {
// avoid overflows
atomicSub(&rowCounters[row], 1);
return 1;
}
p += cnt * SLOT_LEN + xi_offset_for_round(round);
// store "i" (always 4 bytes before Xi)
*(uint *)(p - 4) = i;
if (round == 0 || round == 1)
{
// store 24 bytes
*(ulong *)(p + 0) = xi0;
*(ulong *)(p + 8) = xi1;
*(ulong *)(p + 16) = xi2;
}
else if (round == 2)
{
// store 20 bytes
*(uint *)(p + 0) = xi0;
*(ulong *)(p + 4) = (xi0 >> 32) | (xi1 << 32);
*(ulong *)(p + 12) = (xi1 >> 32) | (xi2 << 32);
}
else if (round == 3)
{
// store 16 bytes
*(uint *)(p + 0) = xi0;
*(ulong *)(p + 4) = (xi0 >> 32) | (xi1 << 32);
*(uint *)(p + 12) = (xi1 >> 32);
}
else if (round == 4)
{
// store 16 bytes
*(ulong *)(p + 0) = xi0;
*(ulong *)(p + 8) = xi1;
}
else if (round == 5)
{
// store 12 bytes
*(ulong *)(p + 0) = xi0;
*(uint *)(p + 8) = xi1;
}
else if (round == 6 || round == 7)
{
// store 8 bytes
*(uint *)(p + 0) = xi0;
*(uint *)(p + 4) = (xi0 >> 32);
}
else if (round == 8)
{
// store 4 bytes
*(uint *)(p + 0) = xi0;
}
return 0;
}
#define rotate(a, bits) ((a) << (bits)) | ((a) >> (64 - (bits)))
#define mix(va, vb, vc, vd, x, y) \
va = (va + vb + x); \
vd = rotate((vd ^ va), (ulong)64 - 32); \
vc = (vc + vd); \
vb = rotate((vb ^ vc), (ulong)64 - 24); \
va = (va + vb + y); \
vd = rotate((vd ^ va), (ulong)64 - 16); \
vc = (vc + vd); \
vb = rotate((vb ^ vc), (ulong)64 - 63);
/*
** Execute round 0 (blake).
**
** Note: making the work group size less than or equal to the wavefront size
** allows the OpenCL compiler to remove the barrier() calls, see "2.2 Local
** Memory (LDS) Optimization 2-10" in:
** http://developer.amd.com/tools-and-sdks/opencl-zone/amd-accelerated-parallel-processing-app-sdk/opencl-optimization-guide/
*/
__global__
void kernel_round0(char *ht, uint *debug)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
ulong v[16];
uint inputs_per_thread = NR_INPUTS / (gridDim.x * blockDim.x);
uint input = tid * inputs_per_thread;
uint input_end = (tid + 1) * inputs_per_thread;
uint dropped = 0;
while (input < input_end)
{
//atomicAdd(&ran, 1);
// shift "i" to occupy the high 32 bits of the second ulong word in the
// message block
ulong word1 = (ulong)input << 32;
// init vector v
v[0] = blake.h[0];
v[1] = blake.h[1];
v[2] = blake.h[2];
v[3] = blake.h[3];
v[4] = blake.h[4];
v[5] = blake.h[5];
v[6] = blake.h[6];
v[7] = blake.h[7];
v[8] = blake_iv[0];
v[9] = blake_iv[1];
v[10] = blake_iv[2];
v[11] = blake_iv[3];
v[12] = blake_iv[4];
v[13] = blake_iv[5];
v[14] = blake_iv[6];
v[15] = blake_iv[7];
// mix in length of data
v[12] ^= ZCASH_BLOCK_HEADER_LEN + 4 /* length of "i" */;
// last block
v[14] ^= (ulong)-1;
// round 1
mix(v[0], v[4], v[8], v[12], 0, word1);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 2
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], word1, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 3
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, word1);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 4
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, word1);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 5
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, word1);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 6
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], word1, 0);
// round 7
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], word1, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 8
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, word1);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 9
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], word1, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 10
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], word1, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 11
mix(v[0], v[4], v[8], v[12], 0, word1);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 12
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], word1, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// compress v into the blake state; this produces the 50-byte hash
// (two Xi values)
ulong h[7];
h[0] = blake.h[0] ^ v[0] ^ v[8];
h[1] = blake.h[1] ^ v[1] ^ v[9];
h[2] = blake.h[2] ^ v[2] ^ v[10];
h[3] = blake.h[3] ^ v[3] ^ v[11];
h[4] = blake.h[4] ^ v[4] ^ v[12];
h[5] = blake.h[5] ^ v[5] ^ v[13];
h[6] = (blake.h[6] ^ v[6] ^ v[14]) & 0xffff;
// store the two Xi values in the hash table
#if ZCASH_HASH_LEN == 50
dropped += ht_store(0, ht, input * 2,
h[0],
h[1],
h[2],
h[3], rowCounter0);
dropped += ht_store(0, ht, input * 2 + 1,
(h[3] >> 8) | (h[4] << (64 - 8)),
(h[4] >> 8) | (h[5] << (64 - 8)),
(h[5] >> 8) | (h[6] << (64 - 8)),
(h[6] >> 8), rowCounter0);
#else
#error "unsupported ZCASH_HASH_LEN"
#endif
input++;
}
#ifdef ENABLE_DEBUG
debug[tid * 2] = 0;
debug[tid * 2 + 1] = dropped;
#endif
}
#if NR_ROWS_LOG <= 16 && NR_SLOTS <= (1 << 8)
#define ENCODE_INPUTS(row, slot0, slot1) \
((row << 16) | ((slot1 & 0xff) << 8) | (slot0 & 0xff))
#define DECODE_ROW(REF) (REF >> 16)
#define DECODE_SLOT1(REF) ((REF >> 8) & 0xff)
#define DECODE_SLOT0(REF) (REF & 0xff)
#elif NR_ROWS_LOG == 18 && NR_SLOTS <= (1 << 7)
#define ENCODE_INPUTS(row, slot0, slot1) \
((row << 14) | ((slot1 & 0x7f) << 7) | (slot0 & 0x7f))
#define DECODE_ROW(REF) (REF >> 14)
#define DECODE_SLOT1(REF) ((REF >> 7) & 0x7f)
#define DECODE_SLOT0(REF) (REF & 0x7f)
#elif NR_ROWS_LOG == 19 && NR_SLOTS <= (1 << 6)
#define ENCODE_INPUTS(row, slot0, slot1) \
((row << 13) | ((slot1 & 0x3f) << 6) | (slot0 & 0x3f)) /* 1 spare bit */
#define DECODE_ROW(REF) (REF >> 13)
#define DECODE_SLOT1(REF) ((REF >> 6) & 0x3f)
#define DECODE_SLOT0(REF) (REF & 0x3f)
#elif NR_ROWS_LOG == 20 && NR_SLOTS <= (1 << 6)
#define ENCODE_INPUTS(row, slot0, slot1) \
((row << 12) | ((slot1 & 0x3f) << 6) | (slot0 & 0x3f))
#define DECODE_ROW(REF) (REF >> 12)
#define DECODE_SLOT1(REF) ((REF >> 6) & 0x3f)
#define DECODE_SLOT0(REF) (REF & 0x3f)
#else
#error "unsupported NR_ROWS_LOG"
#endif
/*
** Access a half-aligned long, that is a long aligned on a 4-byte boundary.
*/
__device__ ulong half_aligned_long(ulong *p, uint offset)
{
return
(((ulong)*(uint *)((char *)p + offset + 0)) << 0) |
(((ulong)*(uint *)((char *)p + offset + 4)) << 32);
}
/*
** Access a well-aligned int.
*/
__device__ uint well_aligned_int(ulong *_p, uint offset)
{
char *p = (char *)_p;
return *(uint *)(p + offset);
}
/*
** XOR a pair of Xi values computed at "round - 1" and store the result in the
** hash table being built for "round". Note that when building the table for
** even rounds we need to skip 1 padding byte present in the "round - 1" table
** (the "0xAB" byte mentioned in the description at the top of this file.) But
** also note we can't load data directly past this byte because this would
** cause an unaligned memory access which is undefined per the OpenCL spec.
**
** Return 0 if successfully stored, or 1 if the row overflowed.
*/
__device__ uint xor_and_store(uint round, char *ht_dst, uint row,
uint slot_a, uint slot_b, ulong *a, ulong *b,
uint *rowCounters)
{
ulong xi0, xi1, xi2;
#if NR_ROWS_LOG >= 16 && NR_ROWS_LOG <= 20
// Note: for NR_ROWS_LOG == 20, for odd rounds, we could optimize by not
// storing the byte containing bits from the previous PREFIX block for
if (round == 1 || round == 2)
{
// xor 24 bytes
xi0 = *(a++) ^ *(b++);
xi1 = *(a++) ^ *(b++);
xi2 = *a ^ *b;
if (round == 2)
{
// skip padding byte
xi0 = (xi0 >> 8) | (xi1 << (64 - 8));
xi1 = (xi1 >> 8) | (xi2 << (64 - 8));
xi2 = (xi2 >> 8);
}
}
else if (round == 3)
{
// xor 20 bytes
xi0 = half_aligned_long(a, 0) ^ half_aligned_long(b, 0);
xi1 = half_aligned_long(a, 8) ^ half_aligned_long(b, 8);
xi2 = well_aligned_int(a, 16) ^ well_aligned_int(b, 16);
}
else if (round == 4 || round == 5)
{
// xor 16 bytes
xi0 = half_aligned_long(a, 0) ^ half_aligned_long(b, 0);
xi1 = half_aligned_long(a, 8) ^ half_aligned_long(b, 8);
xi2 = 0;
if (round == 4)
{
// skip padding byte
xi0 = (xi0 >> 8) | (xi1 << (64 - 8));
xi1 = (xi1 >> 8);
}
}
else if (round == 6)
{
// xor 12 bytes
xi0 = *a++ ^ *b++;
xi1 = *(uint *)a ^ *(uint *)b;
xi2 = 0;
if (round == 6)
{
// skip padding byte
xi0 = (xi0 >> 8) | (xi1 << (64 - 8));
xi1 = (xi1 >> 8);
}
}
else if (round == 7 || round == 8)
{
// xor 8 bytes
xi0 = half_aligned_long(a, 0) ^ half_aligned_long(b, 0);
xi1 = 0;
xi2 = 0;
if (round == 8)
{
// skip padding byte
xi0 = (xi0 >> 8);
}
}
// invalid solutions (which start happenning in round 5) have duplicate
// inputs and xor to zero, so discard them
if (!xi0 && !xi1)
return 0;
#else
#error "unsupported NR_ROWS_LOG"
#endif
return ht_store(round, ht_dst, ENCODE_INPUTS(row, slot_a, slot_b),
xi0, xi1, xi2, 0, rowCounters);
}
__device__ void equihash_round_cm3(uint round,
char *ht_src,
char *ht_dst,
uint *rowCountersSrc,
uint *rowCountersDst)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
char *p;
uint cnt;
uint i, j;
uint dropped_stor = 0;
ulong *a, *b;
uint xi_offset;
static uint size = NR_ROWS;
static uint stride = NR_SLOTS * SLOT_LEN;
xi_offset = (8 + ((round - 1) / 2) * 4);
for (uint ii = tid; ii < size; ii += (blockDim.x * gridDim.x)) {
p = ht_src + ii * stride;
cnt = rowCountersSrc[ii];
cnt = min(cnt, (uint)NR_SLOTS); // handle possible overflow in prev. round
if (!cnt) {// no elements in row, no collisions
continue;
}
// find collisions
for (i = 0; i < cnt; i++) {
for (j = i + 1; j < cnt; j++)
{
a = (ulong *)
(ht_src + ii * stride + i * 32 + xi_offset);
b = (ulong *)
(ht_src + ii * stride + j * 32 + xi_offset);
dropped_stor += xor_and_store(round, ht_dst, ii, i, j, a, b, rowCountersDst);
}
}
//if (round < 8) {
// reset the counter in preparation of the next round
//rowCountersSrc[ii] = 0;//might be doing this already
//*(uint *)(ht_src + ii * ((1 << (((200 / (9 + 1)) + 1) - 20)) * 6) * 32) = 0;
//}
}
}
/*
** Execute one Equihash round. Read from ht_src, XOR colliding pairs of Xi,
** store them in ht_dst.
*/
__device__ void equihash_round(uint round,
char *ht_src,
char *ht_dst,
uint *debug,
uint *rowCountersSrc,
uint *rowCountersDst)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
uint tlid = threadIdx.x;
__shared__ uchar first_words_data[(NR_SLOTS + 2) * 64];
__shared__ uint collisionsData[COLL_DATA_SIZE_PER_TH * 64];
__shared__ uint collisionsNum;
char *p;
uint cnt;
uchar *first_words = &first_words_data[(NR_SLOTS + 2)*tlid];
uchar mask;
uint i, j;
// NR_SLOTS is already oversized (by a factor of OVERHEAD), but we want to
// make it even larger
uint n;
uint dropped_coll = 0;
uint dropped_stor = 0;
ulong *a, *b;
uint xi_offset;
// read first words of Xi from the previous (round - 1) hash table
xi_offset = xi_offset_for_round(round - 1);
// the mask is also computed to read data from the previous round
#if NR_ROWS_LOG == 16
mask = ((!(round & 1)) ? 0x0f : 0xf0);
#elif NR_ROWS_LOG == 18
mask = ((!(round & 1)) ? 0x03 : 0x30);
#elif NR_ROWS_LOG == 19
mask = ((!(round & 1)) ? 0x01 : 0x10);
#elif NR_ROWS_LOG == 20
mask = 0; /* we can vastly simplify the code below */
#else
#error "unsupported NR_ROWS_LOG"
#endif
uint thCollNum = 0;
collisionsNum = 0;
__syncthreads();
p = (ht_src + tid * NR_SLOTS * SLOT_LEN);
cnt = rowCountersSrc[tid];
cnt = min(cnt, (uint)NR_SLOTS); // handle possible overflow in prev. round
if (!cnt) {
// no elements in row, no collisions
goto part2;
}
p += xi_offset;
for (i = 0; i < cnt; i++, p += SLOT_LEN)
first_words[i] = (*(uchar *)p) & mask;
// find collisions
for (i = 0; i < cnt - 1 && thCollNum < COLL_DATA_SIZE_PER_TH; i++)
{
uchar data_i = first_words[i];
uint collision = (tid << 10) | (i << 5) | (i + 1);
for (j = i + 1; (j + 4) < cnt;)
{
{
uint isColl = ((data_i == first_words[j]) ? 1 : 0);
if (isColl)
{
thCollNum++;
uint index = atomicAdd(&collisionsNum, 1);
collisionsData[index] = collision;
}
collision++;
j++;
}
{
uint isColl = ((data_i == first_words[j]) ? 1 : 0);
if (isColl)
{
thCollNum++;
uint index = atomicAdd(&collisionsNum, 1);
collisionsData[index] = collision;
}
collision++;
j++;
}
{
uint isColl = ((data_i == first_words[j]) ? 1 : 0);
if (isColl)
{
thCollNum++;
uint index = atomicAdd(&collisionsNum, 1);
collisionsData[index] = collision;
}
collision++;
j++;
}
{
uint isColl = ((data_i == first_words[j]) ? 1 : 0);
if (isColl)
{
thCollNum++;
uint index = atomicAdd(&collisionsNum, 1);
collisionsData[index] = collision;
}
collision++;
j++;
}
}
for (; j < cnt; j++)
{
uint isColl = ((data_i == first_words[j]) ? 1 : 0);
if (isColl)
{
thCollNum++;
uint index = atomicAdd(&collisionsNum, 1);
collisionsData[index] = collision;
}
collision++;
}
}
part2:
__syncthreads();
uint totalCollisions = collisionsNum;
for (uint index = tlid; index < totalCollisions; index += blockDim.x) {
uint collision = collisionsData[index];
uint collisionThreadId = collision >> 10;
uint i = (collision >> 5) & 0x1F;
uint j = collision & 0x1F;
uchar *ptr = (uchar*)ht_src + collisionThreadId * NR_SLOTS * SLOT_LEN +
xi_offset;
a = (ulong *)(ptr + i * SLOT_LEN);
b = (ulong *)(ptr + j * SLOT_LEN);
dropped_stor += xor_and_store(round, ht_dst, collisionThreadId, i, j,
a, b, rowCountersDst);
}
}
/*
** This defines kernel_round1, kernel_round2, ..., kernel_round7.
*/
#define KERNEL_ROUND_ODD(N) \
__global__ \
void kernel_round ## N( char *ht_src, char *ht_dst, uint *debug) \
{ \
equihash_round(N, ht_src, ht_dst, debug, rowCounter0, rowCounter1); \
}
#define KERNEL_ROUND_EVEN(N) \
__global__ \
void kernel_round ## N( char *ht_src, char *ht_dst, uint *debug) \
{ \
equihash_round(N, ht_src, ht_dst, debug, rowCounter1, rowCounter0); \
}
#define KERNEL_ROUND_ODD_OLD(N) \
__global__ \
void kernel_round_cm3_ ## N( char *ht_src, char *ht_dst) \
{ \
equihash_round_cm3(N, ht_src, ht_dst, rowCounter0, rowCounter1); \
}
#define KERNEL_ROUND_EVEN_OLD(N) \
__global__ \
void kernel_round_cm3_ ## N(char *ht_src, char *ht_dst) \
{ \
equihash_round_cm3(N, ht_src, ht_dst, rowCounter1, rowCounter0); \
}
KERNEL_ROUND_ODD(1)
KERNEL_ROUND_EVEN(2)
KERNEL_ROUND_ODD(3)
KERNEL_ROUND_EVEN(4)
KERNEL_ROUND_ODD(5)
KERNEL_ROUND_EVEN(6)
KERNEL_ROUND_ODD(7)
KERNEL_ROUND_ODD_OLD(1)
KERNEL_ROUND_EVEN_OLD(2)
KERNEL_ROUND_ODD_OLD(3)
KERNEL_ROUND_EVEN_OLD(4)
KERNEL_ROUND_ODD_OLD(5)
KERNEL_ROUND_EVEN_OLD(6)
KERNEL_ROUND_ODD_OLD(7)
// kernel_round8 takes an extra argument, "sols"
__global__
void kernel_round8(char *ht_src, char *ht_dst, uint *debug)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
equihash_round(8, ht_src, ht_dst, debug, rowCounter1, rowCounter0);
if (!tid) {
sols.nr = sols.likely_invalids = 0;
}
}
__global__
void kernel_round_cm3_8(char *ht_src, char *ht_dst)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
equihash_round_cm3(8, ht_src, ht_dst, rowCounter1, rowCounter0);
if (!tid) {
sols.nr = sols.likely_invalids = 0;
}
}
__device__ uint expand_ref(char *ht, uint xi_offset, uint row, uint slot)
{
return *(uint *)(ht + row * NR_SLOTS * SLOT_LEN +
slot * SLOT_LEN + xi_offset - 4);
}
/*
** Expand references to inputs. Return 1 if so far the solution appears valid,
** or 0 otherwise (an invalid solution would be a solution with duplicate
** inputs, which can be detected at the last step: round == 0).
*/
__device__ uint expand_refs(uint *ins, uint nr_inputs, char **htabs,
uint round)
{
char *ht = htabs[round & 1];
uint i = nr_inputs - 1;
uint j = nr_inputs * 2 - 1;
uint xi_offset = xi_offset_for_round(round);
int dup_to_watch = -1;
do
{
ins[j] = expand_ref(ht, xi_offset,
DECODE_ROW(ins[i]), DECODE_SLOT1(ins[i]));
ins[j - 1] = expand_ref(ht, xi_offset,
DECODE_ROW(ins[i]), DECODE_SLOT0(ins[i]));
if (!round)
{
if (dup_to_watch == -1)
dup_to_watch = ins[j];
else if (ins[j] == dup_to_watch || ins[j - 1] == dup_to_watch)
return 0;
}
if (!i)
break;
i--;
j -= 2;
} while (1);
return 1;
}
/*
** Verify if a potential solution is in fact valid.
*/
__device__ void potential_sol(char **htabs, uint ref0, uint ref1)
{
uint nr_values;
uint values_tmp[(1 << PARAM_K)];
uint sol_i;
uint i;
nr_values = 0;
values_tmp[nr_values++] = ref0;
values_tmp[nr_values++] = ref1;
uint round = PARAM_K - 1;
do
{
round--;
if (!expand_refs(values_tmp, nr_values, htabs, round))
return;
nr_values *= 2;
} while (round > 0);
// solution appears valid, copy it to sols
sol_i = atomicAdd(&sols.nr, 1);
if (sol_i >= MAX_SOLS)
return;
for (i = 0; i < (1 << PARAM_K); i++)
sols.values[sol_i][i] = values_tmp[i];
sols.valid[sol_i] = 1;
}
/*
** Scan the hash tables to find Equihash solutions.
*/
__global__
void kernel_sols(char *ht0, char *ht1)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
char *htabs[2] = { ht0, ht1 };
uint ht_i = (PARAM_K - 1) & 1; // table filled at last round
uint cnt;
uint xi_offset = xi_offset_for_round(PARAM_K - 1);
uint i, j;
char *a, *b;
uint ref_i, ref_j;
// it's ok for the collisions array to be so small, as if it fills up
// the potential solutions are likely invalid (many duplicate inputs)
ulong collisions;
uint coll;
#if NR_ROWS_LOG >= 16 && NR_ROWS_LOG <= 20
// in the final hash table, we are looking for a match on both the bits
// part of the previous PREFIX colliding bits, and the last PREFIX bits.
uint mask = 0xffffff;
#else
#error "unsupported NR_ROWS_LOG"
#endif
a = htabs[ht_i] + tid * NR_SLOTS * SLOT_LEN;
cnt = rowCounter0[tid];
cnt = min(cnt, (uint)NR_SLOTS); // handle possible overflow in last round
coll = 0;
a += xi_offset;
for (i = 0; i < cnt; i++, a += SLOT_LEN) {
uint a_data = ((*(uint *)a) & mask);
ref_i = *(uint *)(a - 4);
for (j = i + 1, b = a + SLOT_LEN; j < cnt; j++, b += SLOT_LEN) {
if (a_data == ((*(uint *)b) & mask)) {
ref_j = *(uint *)(b - 4);
collisions = ((ulong)ref_i << 32) | ref_j;
goto exit1;
}
}
}
return;
exit1:
potential_sol(htabs, collisions >> 32, collisions & 0xffffffff);
}
struct __align__(64) c_context {
char* buf_ht[2], *buf_dbg;
//uint *rowCounters[2];
//sols_t *sols;
u32 nthreads;
size_t global_ws;
c_context(const u32 n_threads) {
nthreads = n_threads;
}
void* operator new(size_t i) {
return _mm_malloc(i, 64);
}
void operator delete(void* p) {
_mm_free(p);
}
};
static size_t select_work_size_blake(void)
{
size_t work_size =
64 * /* thread per wavefront */
BLAKE_WPS * /* wavefront per simd */
4 * /* simd per compute unit */
36;
// Make the work group size a multiple of the nr of wavefronts, while
// dividing the number of inputs. This results in the worksize being a
// power of 2.
while (NR_INPUTS % work_size)
work_size += 64;
//debug("Blake: work size %zd\n", work_size);
return work_size;
}
static void sort_pair(uint32_t *a, uint32_t len)
{
uint32_t *b = a + len;
uint32_t tmp, need_sorting = 0;
for (uint32_t i = 0; i < len; i++)
if (need_sorting || a[i] > b[i])
{
need_sorting = 1;
tmp = a[i];
a[i] = b[i];
b[i] = tmp;
}
else if (a[i] < b[i])
return;
}
static uint32_t verify_sol(sols_t *sols, unsigned sol_i)
{
uint32_t *inputs = sols->values[sol_i];
uint32_t seen_len = (1 << (PREFIX + 1)) / 8;
uint8_t seen[(1 << (PREFIX + 1)) / 8];
uint32_t i;
uint8_t tmp;
// look for duplicate inputs
memset(seen, 0, seen_len);
for (i = 0; i < (1 << PARAM_K); i++)
{
tmp = seen[inputs[i] / 8];
seen[inputs[i] / 8] |= 1 << (inputs[i] & 7);
if (tmp == seen[inputs[i] / 8])
{
// at least one input value is a duplicate
sols->valid[sol_i] = 0;
return 0;
}
}
// the valid flag is already set by the GPU, but set it again because
// I plan to change the GPU code to not set it
sols->valid[sol_i] = 1;
// sort the pairs in place
for (uint32_t level = 0; level < PARAM_K; level++)
for (i = 0; i < (1 << PARAM_K); i += (2 << level))
sort_pair(&inputs[i], 1 << level);
return 1;
}
static void compress(uint8_t *out, uint32_t *inputs, uint32_t n)
{
uint32_t byte_pos = 0;
int32_t bits_left = PREFIX + 1;
uint8_t x = 0;
uint8_t x_bits_used = 0;
uint8_t *pOut = out;
while (byte_pos < n)
{
if (bits_left >= 8 - x_bits_used)
{
x |= inputs[byte_pos] >> (bits_left - 8 + x_bits_used);
bits_left -= 8 - x_bits_used;
x_bits_used = 8;
}
else if (bits_left > 0)
{
uint32_t mask = ~(-1 << (8 - x_bits_used));
mask = ((~mask) >> bits_left) & mask;
x |= (inputs[byte_pos] << (8 - x_bits_used - bits_left)) & mask;
x_bits_used += bits_left;
bits_left = 0;
}
else if (bits_left <= 0)
{
assert(!bits_left);
byte_pos++;
bits_left = PREFIX + 1;
}
if (x_bits_used == 8)
{
*pOut++ = x;
x = x_bits_used = 0;
}
}
}
sa_cuda_context::sa_cuda_context(int tpb, int blocks, int id)
: threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
checkCudaErrors(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
checkCudaErrors(hipDeviceSetCacheConfig(hipFuncCachePreferShared));
eq = new c_context(threadsperblock * totalblocks);
#ifdef ENABLE_DEBUG
size_t dbg_size = NR_ROWS;
#else
size_t dbg_size = 1;
#endif
checkCudaErrors(hipMalloc((void**)&eq->buf_dbg, dbg_size));
checkCudaErrors(hipMalloc((void**)&eq->buf_ht[0], HT_SIZE));
checkCudaErrors(hipMalloc((void**)&eq->buf_ht[1], HT_SIZE));
checkCudaErrors(hipDeviceSynchronize());
//eq->sols = (sols_t *)malloc(sizeof(sols_t));
}
sa_cuda_context::~sa_cuda_context()
{
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
delete eq;
}
#define CHECK_LAUNCH() \
checkCudaErrors(hipPeekAtLastError()); \
checkCudaErrors(hipDeviceSynchronize());
static inline void solve_new(c_context *miner, unsigned round)
{
constexpr uint32_t THREAD_SHIFT = 10;
constexpr uint32_t THREAD_COUNT = 1 << THREAD_SHIFT;
constexpr uint32_t DIM_SIZE = (1 << 20) >> THREAD_SHIFT;
// Now on every round!!!!
switch (round) {
case 0:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round0 << <1024, 64 >> >(miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 1:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round1 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 2:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round2 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 3:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round3 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 4:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round4 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 5:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round5 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 6:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round6 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 7:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round7 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 8:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round8 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
}
}
static inline void solve_old(unsigned round, c_context *miner)
{
constexpr uint32_t THREAD_SHIFT = 10;
constexpr uint32_t THREAD_COUNT = 1 << THREAD_SHIFT;
constexpr uint32_t DIM_SIZE = (1 << 20) >> THREAD_SHIFT;
// Now on every round!!!!
switch (round) {
case 0:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round0 << <1024, 64 >> >(miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 1:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_1 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 2:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_2 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 3:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_3 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 4:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_4 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 5:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_5 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 6:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_6 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 7:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_7 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 8:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_8 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
}
}
#include <fstream>
void sa_cuda_context::solve(const char * tequihash_header, unsigned int tequihash_header_len, const char * nonce, unsigned int nonce_len, std::function<bool()> cancelf, std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf, std::function<void(void)> hashdonef)
{
checkCudaErrors(hipSetDevice(device_id));
hipDeviceProp_t prop;
checkCudaErrors(hipGetDeviceProperties(&prop, device_id));
bool bUseOld = prop.major < 5;
unsigned char context[140];
memset(context, 0, 140);
memcpy(context, tequihash_header, tequihash_header_len);
memcpy(context + tequihash_header_len, nonce, nonce_len);
c_context *miner = eq;
//FUNCTION<<<totalblocks, threadsperblock>>>(ARGUMENTS)
blake2b_state_t initialCtx;
zcash_blake2b_init(&initialCtx, ZCASH_HASH_LEN, PARAM_N, PARAM_K);
zcash_blake2b_update(&initialCtx, (const uint8_t*)context, 128, 0);
checkCudaErrors(hipMemcpyToSymbol(blake, &initialCtx, sizeof(blake2b_state_s), 0, hipMemcpyHostToDevice));
for (unsigned round = 0; round < PARAM_K; round++) {
if (bUseOld) {
solve_old(round, miner);
} else {
solve_new(miner, round);
}
if (cancelf()) return;
}
kernel_sols << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[0], miner->buf_ht[1]);
sols_t l_sols;
checkCudaErrors(hipMemcpyFromSymbol(&l_sols, sols, sizeof(sols_t), 0, hipMemcpyDeviceToHost));
if (l_sols.nr > MAX_SOLS)
l_sols.nr = MAX_SOLS;
for (unsigned sol_i = 0; sol_i < l_sols.nr; sol_i++) {
verify_sol(&l_sols, sol_i);
}
uint8_t proof[COMPRESSED_PROOFSIZE * 2];
for (uint32_t i = 0; i < l_sols.nr; i++) {
if (l_sols.valid[i]) {
compress(proof, (uint32_t *)(l_sols.values[i]), 1 << PARAM_K);
solutionf(std::vector<uint32_t>(0), 1344, proof);
}
}
hashdonef();
}
|
0336b4a51223476138cc55bfaeed342e63ea5ced.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <functional>
#include <vector>
#include <iostream>
#include <stdint.h>
#include "sa_cuda_context.hpp"
#include "param.h"
#include "sa_blake.h"
#define WN PARAM_N
#define WK PARAM_K
#define COLLISION_BIT_LENGTH (WN / (WK+1))
#define COLLISION_BYTE_LENGTH ((COLLISION_BIT_LENGTH+7)/8)
#define FINAL_FULL_WIDTH (2*COLLISION_BYTE_LENGTH+sizeof(uint32_t)*(1 << (WK)))
#define NDIGITS (WK+1)
#define DIGITBITS (WN/(NDIGITS))
#define PROOFSIZE (1u<<WK)
#define COMPRESSED_PROOFSIZE ((COLLISION_BIT_LENGTH+1)*PROOFSIZE*4/(8*sizeof(uint32_t)))
typedef unsigned int uint;
typedef unsigned char uchar;
typedef unsigned long long ulong;
typedef unsigned short ushort;
typedef uint32_t u32;
typedef struct sols_s
{
uint nr;
uint likely_invalids;
uchar valid[MAX_SOLS];
uint values[MAX_SOLS][(1 << PARAM_K)];
} sols_t;
/*
** Assuming NR_ROWS_LOG == 16, the hash table slots have this layout (length in
** bytes in parens):
**
** round 0, table 0: cnt(4) i(4) pad(0) Xi(23.0) pad(1)
** round 1, table 1: cnt(4) i(4) pad(0.5) Xi(20.5) pad(3)
** round 2, table 0: cnt(4) i(4) i(4) pad(0) Xi(18.0) pad(2)
** round 3, table 1: cnt(4) i(4) i(4) pad(0.5) Xi(15.5) pad(4)
** round 4, table 0: cnt(4) i(4) i(4) i(4) pad(0) Xi(13.0) pad(3)
** round 5, table 1: cnt(4) i(4) i(4) i(4) pad(0.5) Xi(10.5) pad(5)
** round 6, table 0: cnt(4) i(4) i(4) i(4) i(4) pad(0) Xi( 8.0) pad(4)
** round 7, table 1: cnt(4) i(4) i(4) i(4) i(4) pad(0.5) Xi( 5.5) pad(6)
** round 8, table 0: cnt(4) i(4) i(4) i(4) i(4) i(4) pad(0) Xi( 3.0) pad(5)
**
** If the first byte of Xi is 0xAB then:
** - on even rounds, 'A' is part of the colliding PREFIX, 'B' is part of Xi
** - on odd rounds, 'A' and 'B' are both part of the colliding PREFIX, but
** 'A' is considered redundant padding as it was used to compute the row #
**
** - cnt is an atomic counter keeping track of the number of used slots.
** it is used in the first slot only; subsequent slots replace it with
** 4 padding bytes
** - i encodes either the 21-bit input value (round 0) or a reference to two
** inputs from the previous round
**
** Formula for Xi length and pad length above:
** > for i in range(9):
** > xi=(200-20*i-NR_ROWS_LOG)/8.; ci=8+4*((i)/2); print xi,32-ci-xi
**
** Note that the fractional .5-byte/4-bit padding following Xi for odd rounds
** is the 4 most significant bits of the last byte of Xi.
*/
__constant__ ulong blake_iv[] =
{
0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f,
0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
};
__device__ uint32_t rowCounter0[NR_ROWS];
__device__ uint32_t rowCounter1[NR_ROWS];
__device__ blake2b_state_t blake;
__device__ sols_t sols;
/*
** Reset counters in hash table.
*/
__global__
void kernel_init_ht0()
{
rowCounter0[blockIdx.x * blockDim.x + threadIdx.x] = 0;
}
__global__
void kernel_init_ht1()
{
rowCounter1[blockIdx.x * blockDim.x + threadIdx.x] = 0;
}
/*
** If xi0,xi1,xi2,xi3 are stored consecutively in little endian then they
** represent (hex notation, group of 5 hex digits are a group of PREFIX bits):
** aa aa ab bb bb cc cc cd dd... [round 0]
** --------------------
** ...ab bb bb cc cc cd dd... [odd round]
** --------------
** ...cc cc cd dd... [next even round]
** -----
** Bytes underlined are going to be stored in the slot. Preceding bytes
** (and possibly part of the underlined bytes, depending on NR_ROWS_LOG) are
** used to compute the row number.
**
** Round 0: xi0,xi1,xi2,xi3 is a 25-byte Xi (xi3: only the low byte matter)
** Round 1: xi0,xi1,xi2 is a 23-byte Xi (incl. the colliding PREFIX nibble)
** TODO: update lines below with padding nibbles
** Round 2: xi0,xi1,xi2 is a 20-byte Xi (xi2: only the low 4 bytes matter)
** Round 3: xi0,xi1,xi2 is a 17.5-byte Xi (xi2: only the low 1.5 bytes matter)
** Round 4: xi0,xi1 is a 15-byte Xi (xi1: only the low 7 bytes matter)
** Round 5: xi0,xi1 is a 12.5-byte Xi (xi1: only the low 4.5 bytes matter)
** Round 6: xi0,xi1 is a 10-byte Xi (xi1: only the low 2 bytes matter)
** Round 7: xi0 is a 7.5-byte Xi (xi0: only the low 7.5 bytes matter)
** Round 8: xi0 is a 5-byte Xi (xi0: only the low 5 bytes matter)
**
** Return 0 if successfully stored, or 1 if the row overflowed.
*/
__device__ uint ht_store(uint round, char *ht, uint i,
ulong xi0, ulong xi1, ulong xi2, ulong xi3, uint *rowCounters)
{
uint row;
char *p;
uint cnt;
#if NR_ROWS_LOG == 16
if (!(round & 1))
row = (xi0 & 0xffff);
else
// if we have in hex: "ab cd ef..." (little endian xi0) then this
// formula computes the row as 0xdebc. it skips the 'a' nibble as it
// is part of the PREFIX. The Xi will be stored starting with "ef...";
// 'e' will be considered padding and 'f' is part of the current PREFIX
row = ((xi0 & 0xf00) << 4) | ((xi0 & 0xf00000) >> 12) |
((xi0 & 0xf) << 4) | ((xi0 & 0xf000) >> 12);
#elif NR_ROWS_LOG == 18
if (!(round & 1))
row = (xi0 & 0xffff) | ((xi0 & 0xc00000) >> 6);
else
row = ((xi0 & 0xc0000) >> 2) |
((xi0 & 0xf00) << 4) | ((xi0 & 0xf00000) >> 12) |
((xi0 & 0xf) << 4) | ((xi0 & 0xf000) >> 12);
#elif NR_ROWS_LOG == 19
if (!(round & 1))
row = (xi0 & 0xffff) | ((xi0 & 0xe00000) >> 5);
else
row = ((xi0 & 0xe0000) >> 1) |
((xi0 & 0xf00) << 4) | ((xi0 & 0xf00000) >> 12) |
((xi0 & 0xf) << 4) | ((xi0 & 0xf000) >> 12);
#elif NR_ROWS_LOG == 20
if (!(round & 1))
row = (xi0 & 0xffff) | ((xi0 & 0xf00000) >> 4);
else
row = ((xi0 & 0xf0000) >> 0) |
((xi0 & 0xf00) << 4) | ((xi0 & 0xf00000) >> 12) |
((xi0 & 0xf) << 4) | ((xi0 & 0xf000) >> 12);
#else
#error "unsupported NR_ROWS_LOG"
#endif
xi0 = (xi0 >> 16) | (xi1 << (64 - 16));
xi1 = (xi1 >> 16) | (xi2 << (64 - 16));
xi2 = (xi2 >> 16) | (xi3 << (64 - 16));
p = ht + row * NR_SLOTS * SLOT_LEN;
uint xcnt = atomicAdd(&rowCounters[row], 1);
//printf("inc index %u round %u\n", rowIdx, round);
cnt = xcnt;
//printf("row %u rowOffset %u count is %u\n", rowIdx, rowOffset, cnt);
if (cnt >= NR_SLOTS) {
// avoid overflows
atomicSub(&rowCounters[row], 1);
return 1;
}
p += cnt * SLOT_LEN + xi_offset_for_round(round);
// store "i" (always 4 bytes before Xi)
*(uint *)(p - 4) = i;
if (round == 0 || round == 1)
{
// store 24 bytes
*(ulong *)(p + 0) = xi0;
*(ulong *)(p + 8) = xi1;
*(ulong *)(p + 16) = xi2;
}
else if (round == 2)
{
// store 20 bytes
*(uint *)(p + 0) = xi0;
*(ulong *)(p + 4) = (xi0 >> 32) | (xi1 << 32);
*(ulong *)(p + 12) = (xi1 >> 32) | (xi2 << 32);
}
else if (round == 3)
{
// store 16 bytes
*(uint *)(p + 0) = xi0;
*(ulong *)(p + 4) = (xi0 >> 32) | (xi1 << 32);
*(uint *)(p + 12) = (xi1 >> 32);
}
else if (round == 4)
{
// store 16 bytes
*(ulong *)(p + 0) = xi0;
*(ulong *)(p + 8) = xi1;
}
else if (round == 5)
{
// store 12 bytes
*(ulong *)(p + 0) = xi0;
*(uint *)(p + 8) = xi1;
}
else if (round == 6 || round == 7)
{
// store 8 bytes
*(uint *)(p + 0) = xi0;
*(uint *)(p + 4) = (xi0 >> 32);
}
else if (round == 8)
{
// store 4 bytes
*(uint *)(p + 0) = xi0;
}
return 0;
}
#define rotate(a, bits) ((a) << (bits)) | ((a) >> (64 - (bits)))
#define mix(va, vb, vc, vd, x, y) \
va = (va + vb + x); \
vd = rotate((vd ^ va), (ulong)64 - 32); \
vc = (vc + vd); \
vb = rotate((vb ^ vc), (ulong)64 - 24); \
va = (va + vb + y); \
vd = rotate((vd ^ va), (ulong)64 - 16); \
vc = (vc + vd); \
vb = rotate((vb ^ vc), (ulong)64 - 63);
/*
** Execute round 0 (blake).
**
** Note: making the work group size less than or equal to the wavefront size
** allows the OpenCL compiler to remove the barrier() calls, see "2.2 Local
** Memory (LDS) Optimization 2-10" in:
** http://developer.amd.com/tools-and-sdks/opencl-zone/amd-accelerated-parallel-processing-app-sdk/opencl-optimization-guide/
*/
__global__
void kernel_round0(char *ht, uint *debug)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
ulong v[16];
uint inputs_per_thread = NR_INPUTS / (gridDim.x * blockDim.x);
uint input = tid * inputs_per_thread;
uint input_end = (tid + 1) * inputs_per_thread;
uint dropped = 0;
while (input < input_end)
{
//atomicAdd(&ran, 1);
// shift "i" to occupy the high 32 bits of the second ulong word in the
// message block
ulong word1 = (ulong)input << 32;
// init vector v
v[0] = blake.h[0];
v[1] = blake.h[1];
v[2] = blake.h[2];
v[3] = blake.h[3];
v[4] = blake.h[4];
v[5] = blake.h[5];
v[6] = blake.h[6];
v[7] = blake.h[7];
v[8] = blake_iv[0];
v[9] = blake_iv[1];
v[10] = blake_iv[2];
v[11] = blake_iv[3];
v[12] = blake_iv[4];
v[13] = blake_iv[5];
v[14] = blake_iv[6];
v[15] = blake_iv[7];
// mix in length of data
v[12] ^= ZCASH_BLOCK_HEADER_LEN + 4 /* length of "i" */;
// last block
v[14] ^= (ulong)-1;
// round 1
mix(v[0], v[4], v[8], v[12], 0, word1);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 2
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], word1, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 3
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, word1);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 4
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, word1);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 5
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, word1);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 6
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], word1, 0);
// round 7
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], word1, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 8
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, word1);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 9
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], word1, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 10
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], word1, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 11
mix(v[0], v[4], v[8], v[12], 0, word1);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], 0, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// round 12
mix(v[0], v[4], v[8], v[12], 0, 0);
mix(v[1], v[5], v[9], v[13], 0, 0);
mix(v[2], v[6], v[10], v[14], 0, 0);
mix(v[3], v[7], v[11], v[15], 0, 0);
mix(v[0], v[5], v[10], v[15], word1, 0);
mix(v[1], v[6], v[11], v[12], 0, 0);
mix(v[2], v[7], v[8], v[13], 0, 0);
mix(v[3], v[4], v[9], v[14], 0, 0);
// compress v into the blake state; this produces the 50-byte hash
// (two Xi values)
ulong h[7];
h[0] = blake.h[0] ^ v[0] ^ v[8];
h[1] = blake.h[1] ^ v[1] ^ v[9];
h[2] = blake.h[2] ^ v[2] ^ v[10];
h[3] = blake.h[3] ^ v[3] ^ v[11];
h[4] = blake.h[4] ^ v[4] ^ v[12];
h[5] = blake.h[5] ^ v[5] ^ v[13];
h[6] = (blake.h[6] ^ v[6] ^ v[14]) & 0xffff;
// store the two Xi values in the hash table
#if ZCASH_HASH_LEN == 50
dropped += ht_store(0, ht, input * 2,
h[0],
h[1],
h[2],
h[3], rowCounter0);
dropped += ht_store(0, ht, input * 2 + 1,
(h[3] >> 8) | (h[4] << (64 - 8)),
(h[4] >> 8) | (h[5] << (64 - 8)),
(h[5] >> 8) | (h[6] << (64 - 8)),
(h[6] >> 8), rowCounter0);
#else
#error "unsupported ZCASH_HASH_LEN"
#endif
input++;
}
#ifdef ENABLE_DEBUG
debug[tid * 2] = 0;
debug[tid * 2 + 1] = dropped;
#endif
}
#if NR_ROWS_LOG <= 16 && NR_SLOTS <= (1 << 8)
#define ENCODE_INPUTS(row, slot0, slot1) \
((row << 16) | ((slot1 & 0xff) << 8) | (slot0 & 0xff))
#define DECODE_ROW(REF) (REF >> 16)
#define DECODE_SLOT1(REF) ((REF >> 8) & 0xff)
#define DECODE_SLOT0(REF) (REF & 0xff)
#elif NR_ROWS_LOG == 18 && NR_SLOTS <= (1 << 7)
#define ENCODE_INPUTS(row, slot0, slot1) \
((row << 14) | ((slot1 & 0x7f) << 7) | (slot0 & 0x7f))
#define DECODE_ROW(REF) (REF >> 14)
#define DECODE_SLOT1(REF) ((REF >> 7) & 0x7f)
#define DECODE_SLOT0(REF) (REF & 0x7f)
#elif NR_ROWS_LOG == 19 && NR_SLOTS <= (1 << 6)
#define ENCODE_INPUTS(row, slot0, slot1) \
((row << 13) | ((slot1 & 0x3f) << 6) | (slot0 & 0x3f)) /* 1 spare bit */
#define DECODE_ROW(REF) (REF >> 13)
#define DECODE_SLOT1(REF) ((REF >> 6) & 0x3f)
#define DECODE_SLOT0(REF) (REF & 0x3f)
#elif NR_ROWS_LOG == 20 && NR_SLOTS <= (1 << 6)
#define ENCODE_INPUTS(row, slot0, slot1) \
((row << 12) | ((slot1 & 0x3f) << 6) | (slot0 & 0x3f))
#define DECODE_ROW(REF) (REF >> 12)
#define DECODE_SLOT1(REF) ((REF >> 6) & 0x3f)
#define DECODE_SLOT0(REF) (REF & 0x3f)
#else
#error "unsupported NR_ROWS_LOG"
#endif
/*
** Access a half-aligned long, that is a long aligned on a 4-byte boundary.
*/
__device__ ulong half_aligned_long(ulong *p, uint offset)
{
return
(((ulong)*(uint *)((char *)p + offset + 0)) << 0) |
(((ulong)*(uint *)((char *)p + offset + 4)) << 32);
}
/*
** Access a well-aligned int.
*/
__device__ uint well_aligned_int(ulong *_p, uint offset)
{
char *p = (char *)_p;
return *(uint *)(p + offset);
}
/*
** XOR a pair of Xi values computed at "round - 1" and store the result in the
** hash table being built for "round". Note that when building the table for
** even rounds we need to skip 1 padding byte present in the "round - 1" table
** (the "0xAB" byte mentioned in the description at the top of this file.) But
** also note we can't load data directly past this byte because this would
** cause an unaligned memory access which is undefined per the OpenCL spec.
**
** Return 0 if successfully stored, or 1 if the row overflowed.
*/
__device__ uint xor_and_store(uint round, char *ht_dst, uint row,
uint slot_a, uint slot_b, ulong *a, ulong *b,
uint *rowCounters)
{
ulong xi0, xi1, xi2;
#if NR_ROWS_LOG >= 16 && NR_ROWS_LOG <= 20
// Note: for NR_ROWS_LOG == 20, for odd rounds, we could optimize by not
// storing the byte containing bits from the previous PREFIX block for
if (round == 1 || round == 2)
{
// xor 24 bytes
xi0 = *(a++) ^ *(b++);
xi1 = *(a++) ^ *(b++);
xi2 = *a ^ *b;
if (round == 2)
{
// skip padding byte
xi0 = (xi0 >> 8) | (xi1 << (64 - 8));
xi1 = (xi1 >> 8) | (xi2 << (64 - 8));
xi2 = (xi2 >> 8);
}
}
else if (round == 3)
{
// xor 20 bytes
xi0 = half_aligned_long(a, 0) ^ half_aligned_long(b, 0);
xi1 = half_aligned_long(a, 8) ^ half_aligned_long(b, 8);
xi2 = well_aligned_int(a, 16) ^ well_aligned_int(b, 16);
}
else if (round == 4 || round == 5)
{
// xor 16 bytes
xi0 = half_aligned_long(a, 0) ^ half_aligned_long(b, 0);
xi1 = half_aligned_long(a, 8) ^ half_aligned_long(b, 8);
xi2 = 0;
if (round == 4)
{
// skip padding byte
xi0 = (xi0 >> 8) | (xi1 << (64 - 8));
xi1 = (xi1 >> 8);
}
}
else if (round == 6)
{
// xor 12 bytes
xi0 = *a++ ^ *b++;
xi1 = *(uint *)a ^ *(uint *)b;
xi2 = 0;
if (round == 6)
{
// skip padding byte
xi0 = (xi0 >> 8) | (xi1 << (64 - 8));
xi1 = (xi1 >> 8);
}
}
else if (round == 7 || round == 8)
{
// xor 8 bytes
xi0 = half_aligned_long(a, 0) ^ half_aligned_long(b, 0);
xi1 = 0;
xi2 = 0;
if (round == 8)
{
// skip padding byte
xi0 = (xi0 >> 8);
}
}
// invalid solutions (which start happenning in round 5) have duplicate
// inputs and xor to zero, so discard them
if (!xi0 && !xi1)
return 0;
#else
#error "unsupported NR_ROWS_LOG"
#endif
return ht_store(round, ht_dst, ENCODE_INPUTS(row, slot_a, slot_b),
xi0, xi1, xi2, 0, rowCounters);
}
__device__ void equihash_round_cm3(uint round,
char *ht_src,
char *ht_dst,
uint *rowCountersSrc,
uint *rowCountersDst)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
char *p;
uint cnt;
uint i, j;
uint dropped_stor = 0;
ulong *a, *b;
uint xi_offset;
static uint size = NR_ROWS;
static uint stride = NR_SLOTS * SLOT_LEN;
xi_offset = (8 + ((round - 1) / 2) * 4);
for (uint ii = tid; ii < size; ii += (blockDim.x * gridDim.x)) {
p = ht_src + ii * stride;
cnt = rowCountersSrc[ii];
cnt = min(cnt, (uint)NR_SLOTS); // handle possible overflow in prev. round
if (!cnt) {// no elements in row, no collisions
continue;
}
// find collisions
for (i = 0; i < cnt; i++) {
for (j = i + 1; j < cnt; j++)
{
a = (ulong *)
(ht_src + ii * stride + i * 32 + xi_offset);
b = (ulong *)
(ht_src + ii * stride + j * 32 + xi_offset);
dropped_stor += xor_and_store(round, ht_dst, ii, i, j, a, b, rowCountersDst);
}
}
//if (round < 8) {
// reset the counter in preparation of the next round
//rowCountersSrc[ii] = 0;//might be doing this already
//*(uint *)(ht_src + ii * ((1 << (((200 / (9 + 1)) + 1) - 20)) * 6) * 32) = 0;
//}
}
}
/*
** Execute one Equihash round. Read from ht_src, XOR colliding pairs of Xi,
** store them in ht_dst.
*/
__device__ void equihash_round(uint round,
char *ht_src,
char *ht_dst,
uint *debug,
uint *rowCountersSrc,
uint *rowCountersDst)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
uint tlid = threadIdx.x;
__shared__ uchar first_words_data[(NR_SLOTS + 2) * 64];
__shared__ uint collisionsData[COLL_DATA_SIZE_PER_TH * 64];
__shared__ uint collisionsNum;
char *p;
uint cnt;
uchar *first_words = &first_words_data[(NR_SLOTS + 2)*tlid];
uchar mask;
uint i, j;
// NR_SLOTS is already oversized (by a factor of OVERHEAD), but we want to
// make it even larger
uint n;
uint dropped_coll = 0;
uint dropped_stor = 0;
ulong *a, *b;
uint xi_offset;
// read first words of Xi from the previous (round - 1) hash table
xi_offset = xi_offset_for_round(round - 1);
// the mask is also computed to read data from the previous round
#if NR_ROWS_LOG == 16
mask = ((!(round & 1)) ? 0x0f : 0xf0);
#elif NR_ROWS_LOG == 18
mask = ((!(round & 1)) ? 0x03 : 0x30);
#elif NR_ROWS_LOG == 19
mask = ((!(round & 1)) ? 0x01 : 0x10);
#elif NR_ROWS_LOG == 20
mask = 0; /* we can vastly simplify the code below */
#else
#error "unsupported NR_ROWS_LOG"
#endif
uint thCollNum = 0;
collisionsNum = 0;
__syncthreads();
p = (ht_src + tid * NR_SLOTS * SLOT_LEN);
cnt = rowCountersSrc[tid];
cnt = min(cnt, (uint)NR_SLOTS); // handle possible overflow in prev. round
if (!cnt) {
// no elements in row, no collisions
goto part2;
}
p += xi_offset;
for (i = 0; i < cnt; i++, p += SLOT_LEN)
first_words[i] = (*(uchar *)p) & mask;
// find collisions
for (i = 0; i < cnt - 1 && thCollNum < COLL_DATA_SIZE_PER_TH; i++)
{
uchar data_i = first_words[i];
uint collision = (tid << 10) | (i << 5) | (i + 1);
for (j = i + 1; (j + 4) < cnt;)
{
{
uint isColl = ((data_i == first_words[j]) ? 1 : 0);
if (isColl)
{
thCollNum++;
uint index = atomicAdd(&collisionsNum, 1);
collisionsData[index] = collision;
}
collision++;
j++;
}
{
uint isColl = ((data_i == first_words[j]) ? 1 : 0);
if (isColl)
{
thCollNum++;
uint index = atomicAdd(&collisionsNum, 1);
collisionsData[index] = collision;
}
collision++;
j++;
}
{
uint isColl = ((data_i == first_words[j]) ? 1 : 0);
if (isColl)
{
thCollNum++;
uint index = atomicAdd(&collisionsNum, 1);
collisionsData[index] = collision;
}
collision++;
j++;
}
{
uint isColl = ((data_i == first_words[j]) ? 1 : 0);
if (isColl)
{
thCollNum++;
uint index = atomicAdd(&collisionsNum, 1);
collisionsData[index] = collision;
}
collision++;
j++;
}
}
for (; j < cnt; j++)
{
uint isColl = ((data_i == first_words[j]) ? 1 : 0);
if (isColl)
{
thCollNum++;
uint index = atomicAdd(&collisionsNum, 1);
collisionsData[index] = collision;
}
collision++;
}
}
part2:
__syncthreads();
uint totalCollisions = collisionsNum;
for (uint index = tlid; index < totalCollisions; index += blockDim.x) {
uint collision = collisionsData[index];
uint collisionThreadId = collision >> 10;
uint i = (collision >> 5) & 0x1F;
uint j = collision & 0x1F;
uchar *ptr = (uchar*)ht_src + collisionThreadId * NR_SLOTS * SLOT_LEN +
xi_offset;
a = (ulong *)(ptr + i * SLOT_LEN);
b = (ulong *)(ptr + j * SLOT_LEN);
dropped_stor += xor_and_store(round, ht_dst, collisionThreadId, i, j,
a, b, rowCountersDst);
}
}
/*
** This defines kernel_round1, kernel_round2, ..., kernel_round7.
*/
#define KERNEL_ROUND_ODD(N) \
__global__ \
void kernel_round ## N( char *ht_src, char *ht_dst, uint *debug) \
{ \
equihash_round(N, ht_src, ht_dst, debug, rowCounter0, rowCounter1); \
}
#define KERNEL_ROUND_EVEN(N) \
__global__ \
void kernel_round ## N( char *ht_src, char *ht_dst, uint *debug) \
{ \
equihash_round(N, ht_src, ht_dst, debug, rowCounter1, rowCounter0); \
}
#define KERNEL_ROUND_ODD_OLD(N) \
__global__ \
void kernel_round_cm3_ ## N( char *ht_src, char *ht_dst) \
{ \
equihash_round_cm3(N, ht_src, ht_dst, rowCounter0, rowCounter1); \
}
#define KERNEL_ROUND_EVEN_OLD(N) \
__global__ \
void kernel_round_cm3_ ## N(char *ht_src, char *ht_dst) \
{ \
equihash_round_cm3(N, ht_src, ht_dst, rowCounter1, rowCounter0); \
}
KERNEL_ROUND_ODD(1)
KERNEL_ROUND_EVEN(2)
KERNEL_ROUND_ODD(3)
KERNEL_ROUND_EVEN(4)
KERNEL_ROUND_ODD(5)
KERNEL_ROUND_EVEN(6)
KERNEL_ROUND_ODD(7)
KERNEL_ROUND_ODD_OLD(1)
KERNEL_ROUND_EVEN_OLD(2)
KERNEL_ROUND_ODD_OLD(3)
KERNEL_ROUND_EVEN_OLD(4)
KERNEL_ROUND_ODD_OLD(5)
KERNEL_ROUND_EVEN_OLD(6)
KERNEL_ROUND_ODD_OLD(7)
// kernel_round8 takes an extra argument, "sols"
__global__
void kernel_round8(char *ht_src, char *ht_dst, uint *debug)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
equihash_round(8, ht_src, ht_dst, debug, rowCounter1, rowCounter0);
if (!tid) {
sols.nr = sols.likely_invalids = 0;
}
}
__global__
void kernel_round_cm3_8(char *ht_src, char *ht_dst)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
equihash_round_cm3(8, ht_src, ht_dst, rowCounter1, rowCounter0);
if (!tid) {
sols.nr = sols.likely_invalids = 0;
}
}
__device__ uint expand_ref(char *ht, uint xi_offset, uint row, uint slot)
{
return *(uint *)(ht + row * NR_SLOTS * SLOT_LEN +
slot * SLOT_LEN + xi_offset - 4);
}
/*
** Expand references to inputs. Return 1 if so far the solution appears valid,
** or 0 otherwise (an invalid solution would be a solution with duplicate
** inputs, which can be detected at the last step: round == 0).
*/
__device__ uint expand_refs(uint *ins, uint nr_inputs, char **htabs,
uint round)
{
char *ht = htabs[round & 1];
uint i = nr_inputs - 1;
uint j = nr_inputs * 2 - 1;
uint xi_offset = xi_offset_for_round(round);
int dup_to_watch = -1;
do
{
ins[j] = expand_ref(ht, xi_offset,
DECODE_ROW(ins[i]), DECODE_SLOT1(ins[i]));
ins[j - 1] = expand_ref(ht, xi_offset,
DECODE_ROW(ins[i]), DECODE_SLOT0(ins[i]));
if (!round)
{
if (dup_to_watch == -1)
dup_to_watch = ins[j];
else if (ins[j] == dup_to_watch || ins[j - 1] == dup_to_watch)
return 0;
}
if (!i)
break;
i--;
j -= 2;
} while (1);
return 1;
}
/*
** Verify if a potential solution is in fact valid.
*/
__device__ void potential_sol(char **htabs, uint ref0, uint ref1)
{
uint nr_values;
uint values_tmp[(1 << PARAM_K)];
uint sol_i;
uint i;
nr_values = 0;
values_tmp[nr_values++] = ref0;
values_tmp[nr_values++] = ref1;
uint round = PARAM_K - 1;
do
{
round--;
if (!expand_refs(values_tmp, nr_values, htabs, round))
return;
nr_values *= 2;
} while (round > 0);
// solution appears valid, copy it to sols
sol_i = atomicAdd(&sols.nr, 1);
if (sol_i >= MAX_SOLS)
return;
for (i = 0; i < (1 << PARAM_K); i++)
sols.values[sol_i][i] = values_tmp[i];
sols.valid[sol_i] = 1;
}
/*
** Scan the hash tables to find Equihash solutions.
*/
__global__
void kernel_sols(char *ht0, char *ht1)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
char *htabs[2] = { ht0, ht1 };
uint ht_i = (PARAM_K - 1) & 1; // table filled at last round
uint cnt;
uint xi_offset = xi_offset_for_round(PARAM_K - 1);
uint i, j;
char *a, *b;
uint ref_i, ref_j;
// it's ok for the collisions array to be so small, as if it fills up
// the potential solutions are likely invalid (many duplicate inputs)
ulong collisions;
uint coll;
#if NR_ROWS_LOG >= 16 && NR_ROWS_LOG <= 20
// in the final hash table, we are looking for a match on both the bits
// part of the previous PREFIX colliding bits, and the last PREFIX bits.
uint mask = 0xffffff;
#else
#error "unsupported NR_ROWS_LOG"
#endif
a = htabs[ht_i] + tid * NR_SLOTS * SLOT_LEN;
cnt = rowCounter0[tid];
cnt = min(cnt, (uint)NR_SLOTS); // handle possible overflow in last round
coll = 0;
a += xi_offset;
for (i = 0; i < cnt; i++, a += SLOT_LEN) {
uint a_data = ((*(uint *)a) & mask);
ref_i = *(uint *)(a - 4);
for (j = i + 1, b = a + SLOT_LEN; j < cnt; j++, b += SLOT_LEN) {
if (a_data == ((*(uint *)b) & mask)) {
ref_j = *(uint *)(b - 4);
collisions = ((ulong)ref_i << 32) | ref_j;
goto exit1;
}
}
}
return;
exit1:
potential_sol(htabs, collisions >> 32, collisions & 0xffffffff);
}
struct __align__(64) c_context {
char* buf_ht[2], *buf_dbg;
//uint *rowCounters[2];
//sols_t *sols;
u32 nthreads;
size_t global_ws;
c_context(const u32 n_threads) {
nthreads = n_threads;
}
void* operator new(size_t i) {
return _mm_malloc(i, 64);
}
void operator delete(void* p) {
_mm_free(p);
}
};
static size_t select_work_size_blake(void)
{
size_t work_size =
64 * /* thread per wavefront */
BLAKE_WPS * /* wavefront per simd */
4 * /* simd per compute unit */
36;
// Make the work group size a multiple of the nr of wavefronts, while
// dividing the number of inputs. This results in the worksize being a
// power of 2.
while (NR_INPUTS % work_size)
work_size += 64;
//debug("Blake: work size %zd\n", work_size);
return work_size;
}
static void sort_pair(uint32_t *a, uint32_t len)
{
uint32_t *b = a + len;
uint32_t tmp, need_sorting = 0;
for (uint32_t i = 0; i < len; i++)
if (need_sorting || a[i] > b[i])
{
need_sorting = 1;
tmp = a[i];
a[i] = b[i];
b[i] = tmp;
}
else if (a[i] < b[i])
return;
}
static uint32_t verify_sol(sols_t *sols, unsigned sol_i)
{
uint32_t *inputs = sols->values[sol_i];
uint32_t seen_len = (1 << (PREFIX + 1)) / 8;
uint8_t seen[(1 << (PREFIX + 1)) / 8];
uint32_t i;
uint8_t tmp;
// look for duplicate inputs
memset(seen, 0, seen_len);
for (i = 0; i < (1 << PARAM_K); i++)
{
tmp = seen[inputs[i] / 8];
seen[inputs[i] / 8] |= 1 << (inputs[i] & 7);
if (tmp == seen[inputs[i] / 8])
{
// at least one input value is a duplicate
sols->valid[sol_i] = 0;
return 0;
}
}
// the valid flag is already set by the GPU, but set it again because
// I plan to change the GPU code to not set it
sols->valid[sol_i] = 1;
// sort the pairs in place
for (uint32_t level = 0; level < PARAM_K; level++)
for (i = 0; i < (1 << PARAM_K); i += (2 << level))
sort_pair(&inputs[i], 1 << level);
return 1;
}
static void compress(uint8_t *out, uint32_t *inputs, uint32_t n)
{
uint32_t byte_pos = 0;
int32_t bits_left = PREFIX + 1;
uint8_t x = 0;
uint8_t x_bits_used = 0;
uint8_t *pOut = out;
while (byte_pos < n)
{
if (bits_left >= 8 - x_bits_used)
{
x |= inputs[byte_pos] >> (bits_left - 8 + x_bits_used);
bits_left -= 8 - x_bits_used;
x_bits_used = 8;
}
else if (bits_left > 0)
{
uint32_t mask = ~(-1 << (8 - x_bits_used));
mask = ((~mask) >> bits_left) & mask;
x |= (inputs[byte_pos] << (8 - x_bits_used - bits_left)) & mask;
x_bits_used += bits_left;
bits_left = 0;
}
else if (bits_left <= 0)
{
assert(!bits_left);
byte_pos++;
bits_left = PREFIX + 1;
}
if (x_bits_used == 8)
{
*pOut++ = x;
x = x_bits_used = 0;
}
}
}
sa_cuda_context::sa_cuda_context(int tpb, int blocks, int id)
: threadsperblock(tpb), totalblocks(blocks), device_id(id)
{
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared));
eq = new c_context(threadsperblock * totalblocks);
#ifdef ENABLE_DEBUG
size_t dbg_size = NR_ROWS;
#else
size_t dbg_size = 1;
#endif
checkCudaErrors(cudaMalloc((void**)&eq->buf_dbg, dbg_size));
checkCudaErrors(cudaMalloc((void**)&eq->buf_ht[0], HT_SIZE));
checkCudaErrors(cudaMalloc((void**)&eq->buf_ht[1], HT_SIZE));
checkCudaErrors(cudaDeviceSynchronize());
//eq->sols = (sols_t *)malloc(sizeof(sols_t));
}
sa_cuda_context::~sa_cuda_context()
{
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
delete eq;
}
#define CHECK_LAUNCH() \
checkCudaErrors(cudaPeekAtLastError()); \
checkCudaErrors(cudaDeviceSynchronize());
static inline void solve_new(c_context *miner, unsigned round)
{
constexpr uint32_t THREAD_SHIFT = 10;
constexpr uint32_t THREAD_COUNT = 1 << THREAD_SHIFT;
constexpr uint32_t DIM_SIZE = (1 << 20) >> THREAD_SHIFT;
// Now on every round!!!!
switch (round) {
case 0:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round0 << <1024, 64 >> >(miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 1:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round1 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 2:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round2 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 3:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round3 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 4:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round4 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 5:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round5 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 6:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round6 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 7:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round7 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 8:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round8 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
}
}
static inline void solve_old(unsigned round, c_context *miner)
{
constexpr uint32_t THREAD_SHIFT = 10;
constexpr uint32_t THREAD_COUNT = 1 << THREAD_SHIFT;
constexpr uint32_t DIM_SIZE = (1 << 20) >> THREAD_SHIFT;
// Now on every round!!!!
switch (round) {
case 0:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round0 << <1024, 64 >> >(miner->buf_ht[round & 1], (uint*)miner->buf_dbg);
break;
case 1:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_1 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 2:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_2 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 3:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_3 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 4:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_4 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 5:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_5 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 6:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_6 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 7:
kernel_init_ht1 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_7 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
case 8:
kernel_init_ht0 << <DIM_SIZE, THREAD_COUNT >> > ();
kernel_round_cm3_8 << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[(round - 1) & 1], miner->buf_ht[round & 1]);
break;
}
}
#include <fstream>
void sa_cuda_context::solve(const char * tequihash_header, unsigned int tequihash_header_len, const char * nonce, unsigned int nonce_len, std::function<bool()> cancelf, std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf, std::function<void(void)> hashdonef)
{
checkCudaErrors(cudaSetDevice(device_id));
cudaDeviceProp prop;
checkCudaErrors(cudaGetDeviceProperties(&prop, device_id));
bool bUseOld = prop.major < 5;
unsigned char context[140];
memset(context, 0, 140);
memcpy(context, tequihash_header, tequihash_header_len);
memcpy(context + tequihash_header_len, nonce, nonce_len);
c_context *miner = eq;
//FUNCTION<<<totalblocks, threadsperblock>>>(ARGUMENTS)
blake2b_state_t initialCtx;
zcash_blake2b_init(&initialCtx, ZCASH_HASH_LEN, PARAM_N, PARAM_K);
zcash_blake2b_update(&initialCtx, (const uint8_t*)context, 128, 0);
checkCudaErrors(cudaMemcpyToSymbol(blake, &initialCtx, sizeof(blake2b_state_s), 0, cudaMemcpyHostToDevice));
for (unsigned round = 0; round < PARAM_K; round++) {
if (bUseOld) {
solve_old(round, miner);
} else {
solve_new(miner, round);
}
if (cancelf()) return;
}
kernel_sols << <NR_ROWS >> 6, 64 >> >(miner->buf_ht[0], miner->buf_ht[1]);
sols_t l_sols;
checkCudaErrors(cudaMemcpyFromSymbol(&l_sols, sols, sizeof(sols_t), 0, cudaMemcpyDeviceToHost));
if (l_sols.nr > MAX_SOLS)
l_sols.nr = MAX_SOLS;
for (unsigned sol_i = 0; sol_i < l_sols.nr; sol_i++) {
verify_sol(&l_sols, sol_i);
}
uint8_t proof[COMPRESSED_PROOFSIZE * 2];
for (uint32_t i = 0; i < l_sols.nr; i++) {
if (l_sols.valid[i]) {
compress(proof, (uint32_t *)(l_sols.values[i]), 1 << PARAM_K);
solutionf(std::vector<uint32_t>(0), 1344, proof);
}
}
hashdonef();
}
|
a6499c6f06065c8038c108edc6355bc4edbd9221.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_louvain.h"
#include <algorithm>
#include <stdint.h>
#include <string.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
#include "utils.h"
#define BLOCKS 80
#define THREADS_PER_BLOCK 1024
#define EPS (1e-12)
uint32_t ARRAY_SIZE = 1LL << 28;
__global__ void
prepare_data_structures_kernel(int N, int E, Edge* edges, int* c, float* k, int* nodes_comm, int* new_nodes_comm)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int i = tid; i < N; i += num_threads)
{
c[i] = i;
}
for (int i = tid; i < E; i += num_threads)
{
atomicAdd(&k[edges[i].dst], edges[i].weight);
}
for (int i = tid; i < N; i += num_threads)
{
nodes_comm[i] = 1;
}
for (int i = tid; i < N; i += num_threads)
{
new_nodes_comm[i] = 1;
}
}
__host__ void prepare_data_structures(int N, int E, Edge* edges, float* k, int* nodes_comm, int* new_nodes_comm, int* c, float* ac)
{
CUDA_CHECK(hipMemset(k, '\0', N * sizeof(float)));
hipLaunchKernelGGL(( prepare_data_structures_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, E, edges, c, k, nodes_comm, new_nodes_comm);
CUDA_CHECK(hipMemcpy(ac, k, N * sizeof(float), hipMemcpyDeviceToDevice));
}
__device__ uint32_t arr_hash(uint64_t key, int seed, uint64_t N, uint32_t SIZE)
{
uint64_t l = N * N * seed + key - 1;
l = (l >> 32) * 1605375019ULL + (l & 0xffffffffULL) * 553437317ULL + 3471094223ULL;
l = (l >> 32) * 2769702083ULL + (l & 0xffffffffULL) * 3924398899ULL + 2998053229ULL;
return l & (SIZE - 1);
}
__device__ uint32_t getpos(uint64_t* owner, uint64_t key, int N, uint32_t SIZE)
{
for (int it = 0; ; ++it)
{
uint32_t pos = arr_hash(key, it, N, SIZE);
if (owner[pos] == key)
{
return pos;
}
else if (owner[pos] == 0)
{
if (atomicCAS((unsigned long long*)&owner[pos], (unsigned long long)(0), (unsigned long long)key) == 0)
{
return pos;
}
else if (owner[pos] == key)
{
return pos;
}
}
}
}
__global__ void compute_changes_kernel(int N, int E, float* changes, uint64_t* owner, Edge* edges, int* c, uint32_t SIZE)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int e = tid; e < E; e += num_threads)
{
int vertex = edges[e].src;
if (edges[e].dst != vertex)
{
uint64_t key = (uint64_t)N * vertex + c[edges[e].dst] + 1;
uint32_t pos = getpos(owner, key, N, SIZE);
atomicAdd(&changes[pos], edges[e].weight);
}
}
}
union Magic
{
unsigned long long encoded;
struct {
int comm;
float change;
} decoded;
};
static_assert(sizeof(Magic) == 8, "too much magic");
__global__ void prepare_magic_kernel(int N, Magic* magic, int* c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int v = tid; v < N; v += num_threads)
{
magic[v].decoded.comm = c[v];
magic[v].decoded.change = 0;
}
}
__global__ void modularity_optimisation_kernel(int N, int E, Edge* edges, int* c, float* k, int* nodes_comm, int* new_nodes_comm, float* ac, float m, float* changes, uint64_t* owner, Magic* magic, uint32_t SIZE)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int e = tid; e < E; e += num_threads)
{
int i = c[edges[e].dst];
int vertex = edges[e].src;
if (nodes_comm[c[vertex]] <= 1 && nodes_comm[i] <= 1 && i >= c[vertex])
{
continue;
}
uint32_t pos1 = getpos(owner, (uint64_t)N * vertex + i + 1, N, SIZE);
uint32_t pos2 = getpos(owner, (uint64_t)N * vertex + c[vertex] + 1, N, SIZE);
float change = (changes[pos1] - changes[pos2]) / m + k[vertex] * ((ac[c[vertex]] - k[vertex]) - ac[i]) / (2 * m * m);
if (change < EPS)
{
continue;
}
Magic new_magic;
new_magic.decoded.comm = i;
new_magic.decoded.change = change;
while (true)
{
Magic local_magic = magic[vertex];
if ((change > local_magic.decoded.change ||
(fabs(change - local_magic.decoded.change) < EPS && i < local_magic.decoded.comm)))
{
if (atomicCAS((unsigned long long*)(magic + vertex),
local_magic.encoded, new_magic.encoded)
== local_magic.encoded)
{
atomicAdd(new_nodes_comm + i, 1);
atomicAdd(new_nodes_comm + local_magic.decoded.comm, -1);
break;
}
}
else break;
}
}
}
__global__ void update_ac_kernel(int N, float* ac, int* c, float* k)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int v = tid; v < N; v += num_threads)
{
atomicAdd(&ac[c[v]], k[v]);
}
}
__global__ void compute_new_c_kernel(int N, Magic* magic, int* new_c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int v = tid; v < N; v += num_threads)
{
new_c[v] = magic[v].decoded.comm;
}
}
__host__ void modularity_optimisation(int N, int E, Edge* edges, int* c, float* k, int* new_c, int* nodes_comm, int* new_nodes_comm, float* ac, float m, float* changes, uint64_t* owner, Magic* magic)
{
CUDA_CHECK(hipMemset(owner, '\0', sizeof(int) * ARRAY_SIZE));
CUDA_CHECK(hipMemset(changes, '\0', sizeof(float) * ARRAY_SIZE));
hipLaunchKernelGGL(( compute_changes_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, E, changes, owner, edges, c, ARRAY_SIZE);
hipLaunchKernelGGL(( prepare_magic_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, magic, c);
hipLaunchKernelGGL(( modularity_optimisation_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, E, edges, c, k, nodes_comm, new_nodes_comm, ac, m, changes, owner, magic, ARRAY_SIZE);
hipLaunchKernelGGL(( compute_new_c_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, magic, new_c);
CUDA_CHECK(hipMemset(ac, '\0', N * sizeof(float)));
hipLaunchKernelGGL(( update_ac_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, ac, new_c, k);
}
__global__ void compute_modularity_kernel(int N, int E, Edge* edges, int* c, float* changes, float* ac, float* ac_helper)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int i = tid; i < E; i+= num_threads)
{
if (c[edges[i].src] == c[edges[i].dst])
{
atomicAdd(changes + edges[i].src, edges[i].weight);
}
}
for (int i = tid; i < N; i += num_threads)
{
ac_helper[i] = ac[i] * ac[i];
}
}
__host__ float compute_modularity(int N, int E, Edge* edges, int* c, float* changes, float* ac, float* ac_helper, float m)
{
CUDA_CHECK(hipMemset(changes, 0, N * sizeof(float)));
hipLaunchKernelGGL(( compute_modularity_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, E, edges, c, changes, ac, ac_helper);
float q1 = thrust::reduce(thrust::device, changes, changes + N);
float q2 = thrust::reduce(thrust::device, ac_helper, ac_helper + N);
return q1 / (2 * m) - q2 / (4 * m * m);
}
__global__ void prepare_reorder_kernel(int N, int* reorder, int* c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int i = tid; i < N; i += num_threads)
reorder[c[i]] = 1;
}
__global__ void aggregate_kernel(int E, int orig_N, Edge* edges, int* reorder, int* c, int* final_communities)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int i = tid; i < E; i += num_threads)
{
edges[i].src = reorder[c[edges[i].src]];
edges[i].dst = reorder[c[edges[i].dst]];
}
for (int i = tid; i < orig_N; i += num_threads)
{
final_communities[i] = reorder[c[final_communities[i]]];
}
}
__host__ void aggregate(int& N, int E, int orig_N, Edge* edges, int* c, int* final_communities, float* k, int* nodes_comm, int* new_nodes_comm, float* ac)
{
int* reorder;
CUDA_CHECK(hipMalloc((void**)&reorder, (N + 1) * sizeof(int)));
CUDA_CHECK(hipMemset(reorder, 0, (N + 1) * sizeof(int)));
hipLaunchKernelGGL(( prepare_reorder_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, reorder, c);
thrust::exclusive_scan(thrust::device, reorder, reorder + N + 1, reorder);
hipLaunchKernelGGL(( aggregate_kernel), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, E, orig_N, edges, reorder, c, final_communities);
N = device_fetch_var(reorder + N);
CUDA_CHECK(hipFree(reorder));
prepare_data_structures(N, E, edges, k, nodes_comm, new_nodes_comm, c, ac);
}
__global__ void prepare_final_communities(int* fc, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int i = tid; i < N; i += num_threads)
{
fc[i] = i;
}
}
void gpu_louvain(int N_, Edge* edges_, int E_, float min_gain, bool verbose, std::map<int, int>& reorder)
{
int N;
int orig_N;
int E;
Edge* edges;
Edge* orig_edges;
float m = 0;
int* final_communities;
int* c;
int* new_c;
float* k;
float* ac;
float* ac_helper;
float* changes;
uint64_t* owner;
int* nodes_comm;
int* new_nodes_comm;
Magic* magic;
N = N_;
E = E_;
orig_N = N_;
orig_edges = edges_;
while (E * 10 < ARRAY_SIZE)
{
ARRAY_SIZE >>= 1;
}
hipEvent_t start, algo_start, algo_end, end;
CUDA_CHECK(hipEventCreate(&start));
CUDA_CHECK(hipEventCreate(&algo_start));
CUDA_CHECK(hipEventCreate(&algo_end));
CUDA_CHECK(hipEventCreate(&end));
CUDA_CHECK(hipEventRecord(start, 0));
CUDA_CHECK(hipMalloc((void**)&final_communities, N * sizeof(int)));
CUDA_CHECK(hipMalloc((void**)&c, N * sizeof(int)));
CUDA_CHECK(hipMalloc((void**)&new_c, N * sizeof(int)));
CUDA_CHECK(hipMalloc((void**)&nodes_comm, N * sizeof(int)));
CUDA_CHECK(hipMalloc((void**)&new_nodes_comm, N * sizeof(int)));
CUDA_CHECK(hipMalloc((void**)&k, N * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&ac, N * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&ac_helper, N * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&edges, sizeof(Edge) * E));
CUDA_CHECK(hipMalloc((void**)&changes, ARRAY_SIZE * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&owner, ARRAY_SIZE * sizeof(uint64_t)));
CUDA_CHECK(hipMalloc((void**)&magic, N * sizeof(Magic)));
CUDA_CHECK(hipMemcpy(edges, orig_edges, sizeof(Edge) * E, hipMemcpyHostToDevice));
CUDA_CHECK(hipEventRecord(algo_start, 0));
hipLaunchKernelGGL(( prepare_final_communities), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, final_communities, N);
prepare_data_structures(N, E, edges, k, nodes_comm, new_nodes_comm, c, ac);
// we can compute it on cpu because it's done only once
for (int i = 0; i < E; ++i)
{
m += orig_edges[i].weight;
}
m /= 2;
float old_modularity = compute_modularity(N, E, edges, c, changes, ac, ac_helper, m), new_modularity = 0, sum = 0;
do
{
sum = 0;
do
{
modularity_optimisation(N, E, edges, c, k, new_c, nodes_comm, new_nodes_comm, ac, m, changes, owner, magic);
new_modularity = compute_modularity(N, E, edges, new_c, changes, ac, ac_helper, m);
if (new_modularity - old_modularity > EPS)
{
std::swap(c, new_c);
sum += new_modularity - old_modularity;
std::swap(new_modularity, old_modularity);
CUDA_CHECK(hipMemcpy(nodes_comm, new_nodes_comm, N * sizeof(int), hipMemcpyDeviceToDevice));
}
else break;
} while (true);
aggregate(N, E, orig_N, edges, c, final_communities, k, nodes_comm, new_nodes_comm, ac);
} while (sum > min_gain);
CUDA_CHECK(hipEventRecord(algo_end, 0));
int* final_communities_host = (int*)malloc(orig_N * sizeof(int));
CUDA_CHECK(hipMemcpy(final_communities_host, final_communities, orig_N * sizeof(int), hipMemcpyDeviceToHost));
float* k_host = (float*)malloc(orig_N * sizeof(float));
memset(k_host, '\0', orig_N * sizeof(float));
for (int i = 0; i < E; ++i)
{
k_host[orig_edges[i].dst] += orig_edges[i].weight;
}
float* ac_host = (float*)malloc(orig_N * sizeof(float));
memset(ac_host, '\0', orig_N * sizeof(float));
CUDA_CHECK(hipDeviceSynchronize());
for (int i = 0; i < orig_N; ++i)
{
ac_host[final_communities_host[i]] += k_host[i];
}
float* e_host = (float*)malloc(orig_N * sizeof(float));
memset(e_host, '\0', orig_N * sizeof(float));
for (int i = 0; i < E; ++i)
{
if (final_communities_host[orig_edges[i].src] == final_communities_host[orig_edges[i].dst])
{
e_host[orig_edges[i].src] += orig_edges[i].weight;
}
}
float q = 0;
for (int i = 0; i < orig_N; ++i)
{
q += e_host[i] / (2 * m);
}
for (int i = 0; i < orig_N; ++i)
{
q -= ac_host[i] * ac_host[i] / (4 * m * m);
}
CUDA_CHECK(hipEventRecord(end, 0));
CUDA_CHECK(hipEventSynchronize(end));
float elapsed, elapsed_full;
CUDA_CHECK(hipEventElapsedTime(&elapsed_full, start, end));
CUDA_CHECK(hipEventElapsedTime(&elapsed, algo_start, algo_end));
printf("%f\n", q);
printf("%f %f\n", elapsed, elapsed_full);
if (verbose)
{
printf("%d\n", N);
for (int i = 0; i < N; ++i)
{
printf("%d ", i + 1);
for (int v = 0; v < orig_N; ++v)
{
if (final_communities_host[v] == i)
printf("%d ", reorder[v]);
}
printf("\n");
}
}
free(final_communities_host);
free(k_host);
free(ac_host);
free(e_host);
CUDA_CHECK(hipFree(final_communities));
CUDA_CHECK(hipFree(c));
CUDA_CHECK(hipFree(new_c));
CUDA_CHECK(hipFree(k));
CUDA_CHECK(hipFree(ac));
CUDA_CHECK(hipFree(ac_helper));
CUDA_CHECK(hipFree(changes));
CUDA_CHECK(hipFree(owner));
CUDA_CHECK(hipFree(nodes_comm));
CUDA_CHECK(hipFree(new_nodes_comm));
CUDA_CHECK(hipFree(edges));
CUDA_CHECK(hipFree(magic));
}
|
a6499c6f06065c8038c108edc6355bc4edbd9221.cu
|
#include "gpu_louvain.h"
#include <algorithm>
#include <stdint.h>
#include <string.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
#include "utils.h"
#define BLOCKS 80
#define THREADS_PER_BLOCK 1024
#define EPS (1e-12)
uint32_t ARRAY_SIZE = 1LL << 28;
__global__ void
prepare_data_structures_kernel(int N, int E, Edge* edges, int* c, float* k, int* nodes_comm, int* new_nodes_comm)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int i = tid; i < N; i += num_threads)
{
c[i] = i;
}
for (int i = tid; i < E; i += num_threads)
{
atomicAdd(&k[edges[i].dst], edges[i].weight);
}
for (int i = tid; i < N; i += num_threads)
{
nodes_comm[i] = 1;
}
for (int i = tid; i < N; i += num_threads)
{
new_nodes_comm[i] = 1;
}
}
__host__ void prepare_data_structures(int N, int E, Edge* edges, float* k, int* nodes_comm, int* new_nodes_comm, int* c, float* ac)
{
CUDA_CHECK(cudaMemset(k, '\0', N * sizeof(float)));
prepare_data_structures_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(N, E, edges, c, k, nodes_comm, new_nodes_comm);
CUDA_CHECK(cudaMemcpy(ac, k, N * sizeof(float), cudaMemcpyDeviceToDevice));
}
__device__ uint32_t arr_hash(uint64_t key, int seed, uint64_t N, uint32_t SIZE)
{
uint64_t l = N * N * seed + key - 1;
l = (l >> 32) * 1605375019ULL + (l & 0xffffffffULL) * 553437317ULL + 3471094223ULL;
l = (l >> 32) * 2769702083ULL + (l & 0xffffffffULL) * 3924398899ULL + 2998053229ULL;
return l & (SIZE - 1);
}
__device__ uint32_t getpos(uint64_t* owner, uint64_t key, int N, uint32_t SIZE)
{
for (int it = 0; ; ++it)
{
uint32_t pos = arr_hash(key, it, N, SIZE);
if (owner[pos] == key)
{
return pos;
}
else if (owner[pos] == 0)
{
if (atomicCAS((unsigned long long*)&owner[pos], (unsigned long long)(0), (unsigned long long)key) == 0)
{
return pos;
}
else if (owner[pos] == key)
{
return pos;
}
}
}
}
__global__ void compute_changes_kernel(int N, int E, float* changes, uint64_t* owner, Edge* edges, int* c, uint32_t SIZE)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int e = tid; e < E; e += num_threads)
{
int vertex = edges[e].src;
if (edges[e].dst != vertex)
{
uint64_t key = (uint64_t)N * vertex + c[edges[e].dst] + 1;
uint32_t pos = getpos(owner, key, N, SIZE);
atomicAdd(&changes[pos], edges[e].weight);
}
}
}
union Magic
{
unsigned long long encoded;
struct {
int comm;
float change;
} decoded;
};
static_assert(sizeof(Magic) == 8, "too much magic");
__global__ void prepare_magic_kernel(int N, Magic* magic, int* c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int v = tid; v < N; v += num_threads)
{
magic[v].decoded.comm = c[v];
magic[v].decoded.change = 0;
}
}
__global__ void modularity_optimisation_kernel(int N, int E, Edge* edges, int* c, float* k, int* nodes_comm, int* new_nodes_comm, float* ac, float m, float* changes, uint64_t* owner, Magic* magic, uint32_t SIZE)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int e = tid; e < E; e += num_threads)
{
int i = c[edges[e].dst];
int vertex = edges[e].src;
if (nodes_comm[c[vertex]] <= 1 && nodes_comm[i] <= 1 && i >= c[vertex])
{
continue;
}
uint32_t pos1 = getpos(owner, (uint64_t)N * vertex + i + 1, N, SIZE);
uint32_t pos2 = getpos(owner, (uint64_t)N * vertex + c[vertex] + 1, N, SIZE);
float change = (changes[pos1] - changes[pos2]) / m + k[vertex] * ((ac[c[vertex]] - k[vertex]) - ac[i]) / (2 * m * m);
if (change < EPS)
{
continue;
}
Magic new_magic;
new_magic.decoded.comm = i;
new_magic.decoded.change = change;
while (true)
{
Magic local_magic = magic[vertex];
if ((change > local_magic.decoded.change ||
(fabs(change - local_magic.decoded.change) < EPS && i < local_magic.decoded.comm)))
{
if (atomicCAS((unsigned long long*)(magic + vertex),
local_magic.encoded, new_magic.encoded)
== local_magic.encoded)
{
atomicAdd(new_nodes_comm + i, 1);
atomicAdd(new_nodes_comm + local_magic.decoded.comm, -1);
break;
}
}
else break;
}
}
}
__global__ void update_ac_kernel(int N, float* ac, int* c, float* k)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int v = tid; v < N; v += num_threads)
{
atomicAdd(&ac[c[v]], k[v]);
}
}
__global__ void compute_new_c_kernel(int N, Magic* magic, int* new_c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int v = tid; v < N; v += num_threads)
{
new_c[v] = magic[v].decoded.comm;
}
}
__host__ void modularity_optimisation(int N, int E, Edge* edges, int* c, float* k, int* new_c, int* nodes_comm, int* new_nodes_comm, float* ac, float m, float* changes, uint64_t* owner, Magic* magic)
{
CUDA_CHECK(cudaMemset(owner, '\0', sizeof(int) * ARRAY_SIZE));
CUDA_CHECK(cudaMemset(changes, '\0', sizeof(float) * ARRAY_SIZE));
compute_changes_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(N, E, changes, owner, edges, c, ARRAY_SIZE);
prepare_magic_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(N, magic, c);
modularity_optimisation_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(N, E, edges, c, k, nodes_comm, new_nodes_comm, ac, m, changes, owner, magic, ARRAY_SIZE);
compute_new_c_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(N, magic, new_c);
CUDA_CHECK(cudaMemset(ac, '\0', N * sizeof(float)));
update_ac_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(N, ac, new_c, k);
}
__global__ void compute_modularity_kernel(int N, int E, Edge* edges, int* c, float* changes, float* ac, float* ac_helper)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int i = tid; i < E; i+= num_threads)
{
if (c[edges[i].src] == c[edges[i].dst])
{
atomicAdd(changes + edges[i].src, edges[i].weight);
}
}
for (int i = tid; i < N; i += num_threads)
{
ac_helper[i] = ac[i] * ac[i];
}
}
__host__ float compute_modularity(int N, int E, Edge* edges, int* c, float* changes, float* ac, float* ac_helper, float m)
{
CUDA_CHECK(cudaMemset(changes, 0, N * sizeof(float)));
compute_modularity_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(N, E, edges, c, changes, ac, ac_helper);
float q1 = thrust::reduce(thrust::device, changes, changes + N);
float q2 = thrust::reduce(thrust::device, ac_helper, ac_helper + N);
return q1 / (2 * m) - q2 / (4 * m * m);
}
__global__ void prepare_reorder_kernel(int N, int* reorder, int* c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int i = tid; i < N; i += num_threads)
reorder[c[i]] = 1;
}
__global__ void aggregate_kernel(int E, int orig_N, Edge* edges, int* reorder, int* c, int* final_communities)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int i = tid; i < E; i += num_threads)
{
edges[i].src = reorder[c[edges[i].src]];
edges[i].dst = reorder[c[edges[i].dst]];
}
for (int i = tid; i < orig_N; i += num_threads)
{
final_communities[i] = reorder[c[final_communities[i]]];
}
}
__host__ void aggregate(int& N, int E, int orig_N, Edge* edges, int* c, int* final_communities, float* k, int* nodes_comm, int* new_nodes_comm, float* ac)
{
int* reorder;
CUDA_CHECK(cudaMalloc((void**)&reorder, (N + 1) * sizeof(int)));
CUDA_CHECK(cudaMemset(reorder, 0, (N + 1) * sizeof(int)));
prepare_reorder_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(N, reorder, c);
thrust::exclusive_scan(thrust::device, reorder, reorder + N + 1, reorder);
aggregate_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(E, orig_N, edges, reorder, c, final_communities);
N = device_fetch_var(reorder + N);
CUDA_CHECK(cudaFree(reorder));
prepare_data_structures(N, E, edges, k, nodes_comm, new_nodes_comm, c, ac);
}
__global__ void prepare_final_communities(int* fc, int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for (int i = tid; i < N; i += num_threads)
{
fc[i] = i;
}
}
void gpu_louvain(int N_, Edge* edges_, int E_, float min_gain, bool verbose, std::map<int, int>& reorder)
{
int N;
int orig_N;
int E;
Edge* edges;
Edge* orig_edges;
float m = 0;
int* final_communities;
int* c;
int* new_c;
float* k;
float* ac;
float* ac_helper;
float* changes;
uint64_t* owner;
int* nodes_comm;
int* new_nodes_comm;
Magic* magic;
N = N_;
E = E_;
orig_N = N_;
orig_edges = edges_;
while (E * 10 < ARRAY_SIZE)
{
ARRAY_SIZE >>= 1;
}
cudaEvent_t start, algo_start, algo_end, end;
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&algo_start));
CUDA_CHECK(cudaEventCreate(&algo_end));
CUDA_CHECK(cudaEventCreate(&end));
CUDA_CHECK(cudaEventRecord(start, 0));
CUDA_CHECK(cudaMalloc((void**)&final_communities, N * sizeof(int)));
CUDA_CHECK(cudaMalloc((void**)&c, N * sizeof(int)));
CUDA_CHECK(cudaMalloc((void**)&new_c, N * sizeof(int)));
CUDA_CHECK(cudaMalloc((void**)&nodes_comm, N * sizeof(int)));
CUDA_CHECK(cudaMalloc((void**)&new_nodes_comm, N * sizeof(int)));
CUDA_CHECK(cudaMalloc((void**)&k, N * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&ac, N * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&ac_helper, N * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&edges, sizeof(Edge) * E));
CUDA_CHECK(cudaMalloc((void**)&changes, ARRAY_SIZE * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&owner, ARRAY_SIZE * sizeof(uint64_t)));
CUDA_CHECK(cudaMalloc((void**)&magic, N * sizeof(Magic)));
CUDA_CHECK(cudaMemcpy(edges, orig_edges, sizeof(Edge) * E, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaEventRecord(algo_start, 0));
prepare_final_communities<<<BLOCKS, THREADS_PER_BLOCK>>>(final_communities, N);
prepare_data_structures(N, E, edges, k, nodes_comm, new_nodes_comm, c, ac);
// we can compute it on cpu because it's done only once
for (int i = 0; i < E; ++i)
{
m += orig_edges[i].weight;
}
m /= 2;
float old_modularity = compute_modularity(N, E, edges, c, changes, ac, ac_helper, m), new_modularity = 0, sum = 0;
do
{
sum = 0;
do
{
modularity_optimisation(N, E, edges, c, k, new_c, nodes_comm, new_nodes_comm, ac, m, changes, owner, magic);
new_modularity = compute_modularity(N, E, edges, new_c, changes, ac, ac_helper, m);
if (new_modularity - old_modularity > EPS)
{
std::swap(c, new_c);
sum += new_modularity - old_modularity;
std::swap(new_modularity, old_modularity);
CUDA_CHECK(cudaMemcpy(nodes_comm, new_nodes_comm, N * sizeof(int), cudaMemcpyDeviceToDevice));
}
else break;
} while (true);
aggregate(N, E, orig_N, edges, c, final_communities, k, nodes_comm, new_nodes_comm, ac);
} while (sum > min_gain);
CUDA_CHECK(cudaEventRecord(algo_end, 0));
int* final_communities_host = (int*)malloc(orig_N * sizeof(int));
CUDA_CHECK(cudaMemcpy(final_communities_host, final_communities, orig_N * sizeof(int), cudaMemcpyDeviceToHost));
float* k_host = (float*)malloc(orig_N * sizeof(float));
memset(k_host, '\0', orig_N * sizeof(float));
for (int i = 0; i < E; ++i)
{
k_host[orig_edges[i].dst] += orig_edges[i].weight;
}
float* ac_host = (float*)malloc(orig_N * sizeof(float));
memset(ac_host, '\0', orig_N * sizeof(float));
CUDA_CHECK(cudaDeviceSynchronize());
for (int i = 0; i < orig_N; ++i)
{
ac_host[final_communities_host[i]] += k_host[i];
}
float* e_host = (float*)malloc(orig_N * sizeof(float));
memset(e_host, '\0', orig_N * sizeof(float));
for (int i = 0; i < E; ++i)
{
if (final_communities_host[orig_edges[i].src] == final_communities_host[orig_edges[i].dst])
{
e_host[orig_edges[i].src] += orig_edges[i].weight;
}
}
float q = 0;
for (int i = 0; i < orig_N; ++i)
{
q += e_host[i] / (2 * m);
}
for (int i = 0; i < orig_N; ++i)
{
q -= ac_host[i] * ac_host[i] / (4 * m * m);
}
CUDA_CHECK(cudaEventRecord(end, 0));
CUDA_CHECK(cudaEventSynchronize(end));
float elapsed, elapsed_full;
CUDA_CHECK(cudaEventElapsedTime(&elapsed_full, start, end));
CUDA_CHECK(cudaEventElapsedTime(&elapsed, algo_start, algo_end));
printf("%f\n", q);
printf("%f %f\n", elapsed, elapsed_full);
if (verbose)
{
printf("%d\n", N);
for (int i = 0; i < N; ++i)
{
printf("%d ", i + 1);
for (int v = 0; v < orig_N; ++v)
{
if (final_communities_host[v] == i)
printf("%d ", reorder[v]);
}
printf("\n");
}
}
free(final_communities_host);
free(k_host);
free(ac_host);
free(e_host);
CUDA_CHECK(cudaFree(final_communities));
CUDA_CHECK(cudaFree(c));
CUDA_CHECK(cudaFree(new_c));
CUDA_CHECK(cudaFree(k));
CUDA_CHECK(cudaFree(ac));
CUDA_CHECK(cudaFree(ac_helper));
CUDA_CHECK(cudaFree(changes));
CUDA_CHECK(cudaFree(owner));
CUDA_CHECK(cudaFree(nodes_comm));
CUDA_CHECK(cudaFree(new_nodes_comm));
CUDA_CHECK(cudaFree(edges));
CUDA_CHECK(cudaFree(magic));
}
|
16c1185e938a8f38173e50f9b1e4e3307b28b2d0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header.h"
extern "C" void set_dev_mem_d(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d)
{
int size_t3;
size_t3 = h1d*h2d*h3d*p4d*p5d*p6d;
t3_d = (double *) getGpuMem(size_t3*sizeof(double));
hipMemset(t3_d,0,size_t3*sizeof(double));
}
extern "C" void
dev_mem_d_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d)
{
set_dev_mem_d((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d);
}
extern "C" void
dev_release()
{
freeGpuMem(t3_d);
freeGpuMem(t3_s_d);
}
extern "C" void
dev_release_()
{
dev_release();
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p6,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_1_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p6ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p6_0*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p6_0*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p6_0*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p6_1*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p6_1*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p6_1*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p6_2*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p6_2*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p6_2*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p6_3*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p6_3*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p6_3*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_1_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p6ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*p6d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_1_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p6ld_triplesx=h1d*h3d;
p5ld_triplesx=p6d*h1d*h3d;
p4ld_triplesx=p5d*p6d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_1_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p6ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_2_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
h2_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
h2_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
h2_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
h2_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_2_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h2d=h2d*p6d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*h2d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_2_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
//CUDA_SAFE(
hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice); //);
//CUDA_SAFE(
hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice); //);
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
h7ld_v2sub=h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p4ld_triplesx=p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*1;
int total_y = p4d*p5d*h1d;
//printf("Blocks %d %d\n", total_x, total_y);
//fflush(stdout);
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_2_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,h7d,p4d,p5d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_3_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
h3_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
h3_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
h3_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
h3_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_3_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
h3d=h3d*p6d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h1d*h3d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_3_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h7ld_v2sub=h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p4ld_triplesx=p5d*h3d*h1d;
int total_x = h3d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_3_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_4_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_4_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_4_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p5ld_triplesx=h1d*h3d;
p4ld_triplesx=p5d*h1d*h3d;
p6ld_triplesx=p4d*p5d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_4_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p4,p6] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_5_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h2_0=rest_x%h2d;
rest_x=rest_x/h2d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h2_1=rest_x%h2d;
rest_x=rest_x/h2d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h2_2=rest_x%h2d;
rest_x=rest_x/h2d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h2_3=rest_x%h2d;
rest_x=rest_x/h2d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_5_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*h2d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_5_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
p6ld_v2sub=h2d*h3d;
h7ld_v2sub=p6d*h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p4ld_triplesx=p5d*h2d*h1d*h3d;
p6ld_triplesx=p4d*p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_5_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_6_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_6_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h1d*h3d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_6_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p4ld_triplesx=p5d*h3d*h1d;
p6ld_triplesx=p4d*p5d*h3d*h1d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_6_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_7_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_7_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_7_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p5ld_triplesx=h1d*h3d;
p6ld_triplesx=p5d*h1d*h3d;
p4ld_triplesx=p6d*p5d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_7_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p6,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_8_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h2_0=rest_x%h2d;
rest_x=rest_x/h2d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h2_1=rest_x%h2d;
rest_x=rest_x/h2d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h2_2=rest_x%h2d;
rest_x=rest_x/h2d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h2_3=rest_x%h2d;
rest_x=rest_x/h2d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_8_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*h2d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_8_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
p6ld_v2sub=h2d*h3d;
h7ld_v2sub=p6d*h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p6ld_triplesx=p5d*h2d*h1d*h3d;
p4ld_triplesx=p6d*p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_8_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_9_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_9_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
hipStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h1d*h3d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d1_9_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2sub_d,t2sub,size_t2sub,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2sub_d,v2sub,size_v2sub,hipMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p6ld_triplesx=p5d*h3d*h1d;
p4ld_triplesx=p6d*p5d*h3d*h1d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d1_9_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_1_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
p6_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]-=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]-=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]-=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]-=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
p6d=p6d*p5d;
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p6d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_1_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p6ld_t3=h1d*h2d*h3d;
p4ld_t3=p6d*h1d*h2d*h3d;
int total_x = h3d*p6d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_1_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_2_kernel(int h1d,int h2d,int h3d,int p4d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
h3_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
h3_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
h3_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
h3_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
h3d=h3d*p6d;
h3d=h3d*p5d;
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_2_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
int total_x = h3d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_2_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p6,p4] += t2[p7,p4,h1,h2] * v2[p7,h3,p6]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_3_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
p6_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
p6d=p6d*p5d;
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p6d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_3_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p6ld_t3=h1d*h3d*h2d;
p4ld_t3=p6d*h1d*h3d*h2d;
int total_x = h3d*p6d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_3_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p6d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_4_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p6ld_t3=h1d*h2d*h3d;
p4ld_t3=p6d*h1d*h2d*h3d;
p5ld_t3=p4d*p6d*h1d*h2d*h3d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_4_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_5_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p5ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
h3d=h3d*p6d;
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_5_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p5ld_v2=h3d*p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
p5ld_t3=p4d*h3d*h1d*h2d;
int total_x = h3d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_5_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p6,p4,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_6_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p6d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_6_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p6ld_t3=h1d*h3d*h2d;
p4ld_t3=p6d*h1d*h3d*h2d;
p5ld_t3=p4d*p6d*h1d*h3d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_6_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR();
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_7_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_7_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p4ld_t3=h1d*h2d*h3d;
p6ld_t3=p4d*h1d*h2d*h3d;
p5ld_t3=p6d*p4d*h1d*h2d*h3d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_7_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_8_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_8_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
p6ld_t3=p4d*h3d*h1d*h2d;
p5ld_t3=p6d*p4d*h3d*h1d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_8_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p4,p6,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_9_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
hipStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
hipFuncSetCacheConfig(sd_t_d2_9_kernel, hipFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(hipStream_t*) malloc(nstreams*sizeof(hipStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(hipStreamCreate(&streams[i])) ;
}
CUDA_SAFE(hipMemcpy(t2_d,t2,size_t2,hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d,v2,size_v2,hipMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p4ld_t3=h1d*h3d*h2d;
p6ld_t3=p4d*h1d*h3d*h2d;
p5ld_t3=p6d*p4d*h1d*h3d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_d2_9_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
hipDeviceSynchronize();
for(i=0;i<nstreams;++i){
hipStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
extern "C" void sd_t_d2_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
#define MAX_h3 64
/* IMPORTANT!!!!
t3_d must be passed as parameter to kernel function. A __global__ function can't access the global variable directly*/
__global__ void compute_energy_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,double* eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, double* energy, double factor, int total_size, double* t3d, double* t3_sd)
{
int h1,h2,p6,p4,p5, h3,i=0;
double e1,e2,e4,e5,e6;
// __shared__ double t2_shm[MAX_h3];
__shared__ double energy_s[T1];
__shared__ double energy2_s[T1];
double inner_fac;
int limit;
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
if(threadIdx.x==0)
{
energy[blockIdx.x]=0;
energy[blockIdx.x+gridDim.x]=0;
energy_s[threadIdx.x] = 0.0;
energy2_s[threadIdx.x] = 0.0;
}
for(int j =0; j<T2*T1;j++) {
thread_x = T2*T1*blockIdx.x + j;
rest_x = thread_x;
__syncthreads();
h2=rest_x%h2d;
rest_x=rest_x/h2d;
h1=rest_x%h1d;
rest_x=rest_x/h1d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
rest_x=rest_x/p5d;
p4=rest_x%p4d;
e1 = eval1[h1];
e2 = eval2[h2];
e4 = eval4[p4];
e5 = eval5[p5];
e6 = eval6[p6];
/*
for(p4=0;p4<p4d;p4++)
for(p5 = 0;p5<p5d;p5++)
for(p6=0;p6<p6d;p6++)
for(h1= 0;h1<h1d;h1++)
for(h2=0;h2<h2d;h2++)
for(h3=0;h3<h3d;h3++) {
inner_fac = -eval4[p4]-eval5[p5]-eval6[p6]+eval1[h1]
+eval2[h2]+eval3[h3];
energy_s[0]+=factor*t3d[i]*t3d[i]/inner_fac;
energy2_s[0]+=factor*t3d[i]*(t3_sd[i]+t3d[i])/inner_fac;
i++;
}
*/
if(thread_x<total_size)
for(int i=0;i<h3d;i++)
{
inner_fac = -e4-e5-e6+e1+e2+eval3[i]; //t2_shm[i];
//ckbn avoid e1 in case we need just (T)
energy_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*t3d[thread_x*h3d+i]/inner_fac;
energy2_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*(t3_sd[thread_x*h3d+i]+t3d[thread_x*h3d+i])/inner_fac;
}
__syncthreads();
}
if(threadIdx.x==0)
{
/* limit = blockDim.x;
if (blockIdx.x == (gridDim.x-1)) limit = total_size%blockDim.x;
for(int i=0;i<limit;i++)
{
energy[blockIdx.x]+=energy_s[i];
energy[blockIdx.x+gridDim.x]+=energy2_s[i];
}
*/
energy[blockIdx.x] = energy_s[0];
energy[blockIdx.x+gridDim.x] = energy2_s[0];
}
__syncthreads();
}
extern "C" void compute_energy(double factor, double* energy, double* eval1, double* eval2,double* eval3,double* eval4,double* eval5,double* eval6,int h1d, int h2d, int h3d, int p4d, int p5d,int p6d, double* host1, double* host2)
//ckbn en_comment, double* total_d, double* total_s)
{
double* energy_d, *energy_h;
double* eval_d1,*eval_d2,*eval_d3,*eval_d4,*eval_d5,*eval_d6;
int size_energy = 2*sizeof(double);
int total_block = DIV_UB((h1d*h2d*p4d*p5d*p6d), (T2*T1));
// int total_block = 1;
int total_elements = h1d*h2d*p4d*p5d*p6d;
energy_d = (double*)getGpuMem(size_energy*total_block*2);
int i=0,in;
double* t3 = (double*)malloc(sizeof(double)*h3d*total_elements);
double* ts3 = (double*)malloc(sizeof(double)*h3d*total_elements);
energy_h = (double*)getHostMem(size_energy*2*total_block);
eval_d1 = (double*)getGpuMem(h1d*sizeof(double));
eval_d2 = (double*)getGpuMem(h2d*sizeof(double));
eval_d3 = (double*)getGpuMem(h3d*sizeof(double));
eval_d4 = (double*)getGpuMem(p4d*sizeof(double));
eval_d5 = (double*)getGpuMem(p5d*sizeof(double));
eval_d6 = (double*)getGpuMem(p6d*sizeof(double));
CUDA_SAFE(hipMemcpy(eval_d1, eval1, h1d*sizeof(double), hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(eval_d2, eval2, h2d*sizeof(double), hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(eval_d3, eval3, h3d*sizeof(double), hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(eval_d4, eval4, p4d*sizeof(double), hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(eval_d5, eval5, p5d*sizeof(double), hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(eval_d6, eval6, p6d*sizeof(double), hipMemcpyHostToDevice));
/* for test only */
//printf("host 2 is %f %f\n", host2[0], host2[1]);
// CUDA_SAFE(hipMemcpy(t3_s_d, host2, total_elements*h3d*sizeof(double), hipMemcpyHostToDevice));
dim3 dimBlock(1); //T2*T1);
dim3 dimGrid(total_block);
hipLaunchKernelGGL(( compute_energy_kernel), dim3(dimGrid),dim3(dimBlock),0, 0, h1d,h2d,h3d,p4d,p5d,p6d, eval_d1,eval_d2,eval_d3,eval_d4,eval_d5,eval_d6,energy_d, factor, h1d*h2d*p4d*p5d*p6d, t3_d, t3_s_d);
hipDeviceSynchronize();
//CHECK_ERR("Kernel execution failed");
CUDA_SAFE(hipMemcpy(((char *) energy_h) , ((char *) energy_d) ,
size_energy*total_block*2, hipMemcpyDeviceToHost));
for(int i=1;i<dimGrid.x;i++)
{
energy_h[0]+=energy_h[i];
energy_h[dimGrid.x]+=energy_h[i+dimGrid.x];
}
// printf("CUDA energy_h is %f %f %d %d %d %d %d %d\n", energy_h[0], energy_h[dimGrid.x]); //, total_size, h1d, h2d, p4d, p5d,p6d);
/*
CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_d) , sizeof(double)*h3d*total_elements, hipMemcpyDeviceToHost));
CUDA_SAFE(hipMemcpy(((char *) ts3) , ((char *) t3_s_d) , sizeof(double)*h3d*total_elements, hipMemcpyDeviceToHost));
total_s[0]=0.0, total_d[0]=0.0;
for(int i=0;i<h3d*total_elements;i++) {
total_s[0] += ts3[i];
total_d[0] += t3[i];
}
*/
// printf("Total doubles and singles %f, %f\n", total_d, total_s);
energy[0] = energy_h[0];
energy[1] = energy_h[dimGrid.x];
freeGpuMem(energy_d);
freeGpuMem(eval_d1);
freeGpuMem(eval_d2);
freeGpuMem(eval_d3);
freeGpuMem(eval_d4);
freeGpuMem(eval_d5);
freeGpuMem(eval_d6);
freeHostMem(energy_h);
}
extern "C" void
compute_en_(double * factor, double * energy, double * eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double* host1, double* host2)
//ckbn en_comment,double* total_d, double* total_s)
{
compute_energy((double) *factor, energy, eval1,eval2, eval3, eval4, eval5, eval6,(int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, host1, host2);
//ckbn en_comment ,total_d, total_s);
}
//__device__ double* t3_d;
extern "C" void set_dev_mem_s(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d)
{
int size_t3;
size_t3 = h1d*h2d*h3d*p4d*p5d*p6d;
t3_s_d = (double *) getGpuMem(size_t3*sizeof(double));
hipMemset(t3_s_d,0,size_t3*sizeof(double));
}
extern "C" void
dev_mem_s_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d)
{
set_dev_mem_s((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_1_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3, double *t2_d, double *v2_d,int p4, int total_x, double* t3d) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
//CUDA_SAFE(hipMalloc((void**) &t3_d, size_t3));
//CUDA_SAFE(hipMalloc((void**) &t2_d, size_t2));
//CUDA_SAFE(hipMalloc((void**) &v2_d, size_v2));
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_1_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
}
/*
st = timer();
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/
hipDeviceSynchronize();
// CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
// hipFree(t2_d);
// hipFree(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_1_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_1_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_2_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t2_d, double *v2_d,int p4, int total_x, double* t3d) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}*/
//CUDA_SAFE(hipMalloc((void**) &t2_d, size_t2));
//CUDA_SAFE(hipMalloc((void**) &v2_d, size_v2));
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
/* assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}*/
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
// for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_2_kernel), dim3(dimGrid),dim3(dimBlock),0, 0, h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
// }
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
hipDeviceSynchronize();
// CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
/*
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}*/
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_2_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_2_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
extern "C" void
sd_t_s1_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_1_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/ hipDeviceSynchronize();
//CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_3_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_3_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
/* assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}*/
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
i=0;
// for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_4_kernel), dim3(dimGrid),dim3(dimBlock),0, 0, h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
//sd_t_s1_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
// }
hipDeviceSynchronize();
/* CUDA_SAFE(hipMemcpy(((char *) t3_p) , ((char *) t3_d) , size_block_t3, hipMemcpyDeviceToHost));
printf("Time for Async DeviceToHost %f\n", et-st);
stream = 0;
// while (stream < nstreams) {
// while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = t3_p; //[stream * size_el_block_t3];
double *dst = t3; //[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] -= src[i];
}
// stream++;
// }
*/
// hipDeviceSynchronize();
/*
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}*/
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_4_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_4_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_5_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_5_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/
hipDeviceSynchronize();
//CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_5_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_5_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h1,h3,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_6_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_6_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/* for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
hipDeviceSynchronize();
//CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_6_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_6_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_7_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_7_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
hipDeviceSynchronize();
//CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_7_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_7_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_8_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
extern "C" void
sd_t_s1_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_8_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
hipDeviceSynchronize();
// CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_8_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_8_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h1,h3,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
extern "C" void
sd_t_s1_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
hipStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
hipMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipStreamCreate(&streams[i]));
}
CUDA_SAFE(hipMemcpy(t2_d, t2, size_t2, hipMemcpyHostToDevice));
CUDA_SAFE(hipMemcpy(v2_d, v2, size_v2, hipMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
hipLaunchKernelGGL(( sd_t_s1_7_kernel), dim3(dimGrid),dim3(dimBlock),0,streams[i], h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(hipMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, hipMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (hipStreamQuery(streams[stream]) != hipSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
hipDeviceSynchronize();
//CUDA_SAFE(hipMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, hipMemcpyDeviceToHost));
// printf("out is %lf\n", t3_p[0]);
for (i = 0; i < nstreams; ++i) {
hipStreamDestroy(streams[i]);
}
//freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_9_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_9_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
|
16c1185e938a8f38173e50f9b1e4e3307b28b2d0.cu
|
#include "header.h"
extern "C" void set_dev_mem_d(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d)
{
int size_t3;
size_t3 = h1d*h2d*h3d*p4d*p5d*p6d;
t3_d = (double *) getGpuMem(size_t3*sizeof(double));
cudaMemset(t3_d,0,size_t3*sizeof(double));
}
extern "C" void
dev_mem_d_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d)
{
set_dev_mem_d((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d);
}
extern "C" void
dev_release()
{
freeGpuMem(t3_d);
freeGpuMem(t3_s_d);
}
extern "C" void
dev_release_()
{
dev_release();
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p6,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_1_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p6ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p6_0*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p6_0*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p6_0*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p6_1*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p6_1*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p6_1*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p6_2*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p6_2*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p6_2*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p6_3*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p6_3*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p6_3*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_1_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p6ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*p6d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_1_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p6ld_triplesx=h1d*h3d;
p5ld_triplesx=p6d*h1d*h3d;
p4ld_triplesx=p5d*p6d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p6ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_2_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
h2_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
h2_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
h2_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
h2_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_2_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h2d=h2d*p6d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*h2d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_2_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
//CUDA_SAFE(
cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice); //);
//CUDA_SAFE(
cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice); //);
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
h7ld_v2sub=h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p4ld_triplesx=p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*1;
int total_y = p4d*p5d*h1d;
//printf("Blocks %d %d\n", total_x, total_y);
//fflush(stdout);
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_2_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,h7d,p4d,p5d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_3_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
h3_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
h3_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
h3_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
h3_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_3_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
h3d=h3d*p6d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h1d*h3d*p5d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_3_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h7ld_v2sub=h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p4ld_triplesx=p5d*h3d*h1d;
int total_x = h3d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_3_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_4_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_4_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_4_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p5ld_triplesx=h1d*h3d;
p4ld_triplesx=p5d*h1d*h3d;
p6ld_triplesx=p4d*p5d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p4,p6] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_5_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h2_0=rest_x%h2d;
rest_x=rest_x/h2d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h2_1=rest_x%h2d;
rest_x=rest_x/h2d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h2_2=rest_x%h2d;
rest_x=rest_x/h2d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h2_3=rest_x%h2d;
rest_x=rest_x/h2d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_5_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*h2d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_5_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
p6ld_v2sub=h2d*h3d;
h7ld_v2sub=p6d*h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p4ld_triplesx=p5d*h2d*h1d*h3d;
p6ld_triplesx=p4d*p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_5_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_6_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p4ld_triplesx,int p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_6_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h1d*h3d*p5d*p4d*p6d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_6_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p4ld_triplesx=p5d*h3d*h1d;
p6ld_triplesx=p4d*p5d*h3d*h1d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_6_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_7_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_7_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_7_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
p5ld_triplesx=h1d*h3d;
p6ld_triplesx=p5d*h1d*h3d;
p4ld_triplesx=p6d*p5d*h1d*h3d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h3,h1,h2,p5,p6,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_8_kernel(int h1d,int h2d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int h2ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h3ld_triplesx,int h1ld_triplesx,int h2ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h2_0=rest_x%h2d;
rest_x=rest_x/h2d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h2_1=rest_x%h2d;
rest_x=rest_x/h2d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h2_2=rest_x%h2d;
rest_x=rest_x/h2d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h2_3=rest_x%h2d;
rest_x=rest_x/h2d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_8_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h3d*h1d*h2d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*h2d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_8_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
h2ld_v2sub=h3d;
p6ld_v2sub=h2d*h3d;
h7ld_v2sub=p6d*h2d*h3d;
h3ld_triplesx=1;
h1ld_triplesx=h3d;
h2ld_triplesx=h1d*h3d;
p5ld_triplesx=h2d*h1d*h3d;
p6ld_triplesx=p5d*h2d*h1d*h3d;
p4ld_triplesx=p6d*p5d*h2d*h1d*h3d;
int total_x = h3d*h2d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_8_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*triplesx[h1,h3,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d1_9_kernel(int h1d,int h3d,int h7d,int p4d,int p5d,int p6d,int h7ld_t2sub,int p4ld_t2sub,int p5ld_t2sub,int h1ld_t2sub,int h3ld_v2sub,int p6ld_v2sub,int h7ld_v2sub,int h1ld_triplesx,int h3ld_triplesx,int p5ld_triplesx,int p6ld_triplesx,int p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,h7l,h7T;
__shared__ double t2sub_shm[4*T1][Tcomm];
__shared__ double v2sub_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p5_0=rest_y%p5d;
rest_y=rest_y/p5d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p5_1=rest_y%p5d;
rest_y=rest_y/p5d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p5_2=rest_y%p5d;
rest_y=rest_y/p5d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p5_3=rest_y%p5d;
rest_y=rest_y/p5d;
p4_3=rest_y;
p6_3=rest_x;
int t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){int h7l_hi;
h7l_hi = MIN(Tcomm+h7T,h7d)-h7T;
t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub;
v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub;
if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub;
v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub;
if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub;
v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub;
if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub;
v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub;
if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){
h7=h7l+h7T;
t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub];
}
if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){
h7=h7l+h7T;
v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub];
}
__syncthreads();
for(h7l=0;h7l<h7l_hi;++h7l){
a1=t2sub_shm[in1_idxl+T1*0][h7l];
a2=t2sub_shm[in1_idxl+T1*1][h7l];
a3=t2sub_shm[in1_idxl+T1*2][h7l];
a4=t2sub_shm[in1_idxl+T1*3][h7l];
b1=v2sub_shm[h7l][in2_idxl+T2*0];
b2=v2sub_shm[h7l][in2_idxl+T2*1];
b3=v2sub_shm[h7l][in2_idxl+T2*2];
b4=v2sub_shm[h7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d1_9_cuda(int h1d, int h2d, int h3d, int h7d, int p4d, int p5d, int p6d, double *triplesx, double *t2sub, double *v2sub) {
h3d=h3d*h2d;
size_t stream;
size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx;
size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub;
cudaStream_t *streams;
size_t nstreams,i;
double *triplesx_d,*t2sub_d,*v2sub_d,*triplesx_p;
size_triplesx=h1d*h3d*p5d*p6d*p4d*sizeof(double);
size_t2sub=h7d*p4d*p5d*h1d*sizeof(double);
size_v2sub=h3d*p6d*h7d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d1_9_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_triplesx=size_triplesx/nstreams;
size_el_block_triplesx=size_block_triplesx/sizeof(double);
t2sub_d=(double*)getGpuMem(size_t2sub);
v2sub_d=(double*)getGpuMem(size_v2sub);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice));
h7ld_t2sub=1;
p4ld_t2sub=h7d;
p5ld_t2sub=p4d*h7d;
h1ld_t2sub=p5d*p4d*h7d;
h3ld_v2sub=1;
p6ld_v2sub=h3d;
h7ld_v2sub=p6d*h3d;
h1ld_triplesx=1;
h3ld_triplesx=h1d;
p5ld_triplesx=h3d*h1d;
p6ld_triplesx=p5d*h3d*h1d;
p4ld_triplesx=p6d*p5d*h3d*h1d;
int total_x = h3d*p6d*1;
int total_y = p4d*p5d*h1d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d1_9_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
freeGpuMem(t2sub_d);
freeGpuMem(v2sub_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d1_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) {
sd_t_d1_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*h7d,(int)*p4d,(int)*p5d,(int)*p6d,triplesx,t2sub,v2sub);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_1_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
p6_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]-=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]-=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]-=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]-=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
p6d=p6d*p5d;
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p6d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_1_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p6ld_t3=h1d*h2d*h3d;
p4ld_t3=p6d*h1d*h2d*h3d;
int total_x = h3d*p6d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_1_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_2_kernel(int h1d,int h2d,int h3d,int p4d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
h3_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
h3_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
h3_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
h3_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
h3d=h3d*p6d;
h3d=h3d*p5d;
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_2_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
int total_x = h3d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_2_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_2_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p6,p4] += t2[p7,p4,h1,h2] * v2[p7,h3,p6]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_3_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p4_0=rest_y;
p6_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p4_1=rest_y;
p6_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p4_2=rest_y;
p6_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p4_3=rest_y;
p6_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
p6d=p6d*p5d;
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p6d*p4d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_3_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p6ld_t3=h1d*h3d*h2d;
p4ld_t3=p6d*h1d*h3d*h2d;
int total_x = h3d*p6d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_3_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_3_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p6d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_4_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p6ld_t3=h1d*h2d*h3d;
p4ld_t3=p6d*h1d*h2d*h3d;
p5ld_t3=p4d*p6d*h1d*h2d*h3d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_4_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_5_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p5ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
h3d=h3d*p6d;
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_5_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p5ld_v2=h3d*p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
p5ld_t3=p4d*h3d*h1d*h2d;
int total_x = h3d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_5_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_5_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p6,p4,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_6_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p6d*p4d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_6_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p6ld_t3=h1d*h3d*h2d;
p4ld_t3=p6d*h1d*h3d*h2d;
p5ld_t3=p4d*p6d*h1d*h3d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_6_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR();
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_6_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_7_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h3d*h2d*h1d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_7_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h3ld_t3=1;
h2ld_t3=h3d;
h1ld_t3=h2d*h3d;
p4ld_t3=h1d*h2d*h3d;
p6ld_t3=p4d*h1d*h2d*h3d;
p5ld_t3=p6d*p4d*h1d*h2d*h3d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_7_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h1,h3,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_8_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h1ld_t3,int h3ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h1d*h3d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_8_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h1ld_t3=h2d;
h3ld_t3=h1d*h2d;
p4ld_t3=h3d*h1d*h2d;
p6ld_t3=p4d*h3d*h1d*h2d;
p5ld_t3=p6d*p4d*h3d*h1d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_8_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void sd_t_d2_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_8_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
/*----------------------------------------------------------------------*
*t3[h2,h3,h1,p4,p6,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_d2_9_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p7d,int p7ld_t2,int p4ld_t2,int h1ld_t2,int h2ld_t2,int p7ld_v2,int h3ld_v2,int p6ld_v2,int p5ld_v2,int h2ld_t3,int h3ld_t3,int h1ld_t3,int p4ld_t3,int p6ld_t3,int p5ld_t3,double *t3d, double *t2_d, double *v2_d,int unused_idx, int total_x, int total_y) {
int h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7;
double a1,b1;
double a2,b2;
double a3,b3;
double a4,b4;
int in1_idxl,in2_idxl,p7l,p7T;
__shared__ double t2_shm[4*T1][Tcomm];
__shared__ double v2_shm[Tcomm][4*T2];
int rest_x=blockIdx.x;
int rest_y=blockIdx.y;
int thread_x = T2*4 * rest_x + threadIdx.x;
int thread_y = T1*4 * rest_y + threadIdx.y;
in1_idxl=threadIdx.y;
in2_idxl=threadIdx.x ;
double tlocal1=0;
double tlocal2=0;
double tlocal3=0;
double tlocal4=0;
double tlocal5=0;
double tlocal6=0;
double tlocal7=0;
double tlocal8=0;
double tlocal9=0;
double tlocal10=0;
double tlocal11=0;
double tlocal12=0;
double tlocal13=0;
double tlocal14=0;
double tlocal15=0;
double tlocal16=0;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0;
h2_0=rest_y%h2d;
rest_y=rest_y/h2d;
h3_0=rest_x%h3d;
rest_x=rest_x/h3d;
h1_0=rest_y%h1d;
rest_y=rest_y/h1d;
p6_0=rest_x%p6d;
rest_x=rest_x/p6d;
p4_0=rest_y;
p5_0=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1;
h2_1=rest_y%h2d;
rest_y=rest_y/h2d;
h3_1=rest_x%h3d;
rest_x=rest_x/h3d;
h1_1=rest_y%h1d;
rest_y=rest_y/h1d;
p6_1=rest_x%p6d;
rest_x=rest_x/p6d;
p4_1=rest_y;
p5_1=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2;
h2_2=rest_y%h2d;
rest_y=rest_y/h2d;
h3_2=rest_x%h3d;
rest_x=rest_x/h3d;
h1_2=rest_y%h1d;
rest_y=rest_y/h1d;
p6_2=rest_x%p6d;
rest_x=rest_x/p6d;
p4_2=rest_y;
p5_2=rest_x;
rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3;
rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3;
h2_3=rest_y%h2d;
rest_y=rest_y/h2d;
h3_3=rest_x%h3d;
rest_x=rest_x/h3d;
h1_3=rest_y%h1d;
rest_y=rest_y/h1d;
p6_3=rest_x%p6d;
rest_x=rest_x/p6d;
p4_3=rest_y;
p5_3=rest_x;
int t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){int p7l_hi;
p7l_hi = MIN(Tcomm+p7T,p7d)-p7T;
t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2;
v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2;
if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2;
v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2;
if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2;
v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2;
if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2];
}
t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2;
v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2;
if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){
p7=p7l+p7T;
t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2];
}
if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){
p7=p7l+p7T;
v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2];
}
__syncthreads();
for(p7l=0;p7l<p7l_hi;++p7l){
a1=t2_shm[in1_idxl+T1*0][p7l];
a2=t2_shm[in1_idxl+T1*1][p7l];
a3=t2_shm[in1_idxl+T1*2][p7l];
a4=t2_shm[in1_idxl+T1*3][p7l];
b1=v2_shm[p7l][in2_idxl+T2*0];
b2=v2_shm[p7l][in2_idxl+T2*1];
b3=v2_shm[p7l][in2_idxl+T2*2];
b4=v2_shm[p7l][in2_idxl+T2*3];
tlocal1+=a1*b1;
tlocal2+=a2*b1;
tlocal3+=a3*b1;
tlocal4+=a4*b1;
tlocal5+=a1*b2;
tlocal6+=a2*b2;
tlocal7+=a3*b2;
tlocal8+=a4*b2;
tlocal9+=a1*b3;
tlocal10+=a2*b3;
tlocal11+=a3*b3;
tlocal12+=a4*b3;
tlocal13+=a1*b4;
tlocal14+=a2*b4;
tlocal15+=a3*b4;
tlocal16+=a4*b4;
}
__syncthreads();
}
if(thread_x+T1*0<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal3;
t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal4;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal3;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1;
}
}
if(thread_x+T1*1<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal7;
t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal8;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal7;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5;
}
}
if(thread_x+T1*2<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal11;
t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal12;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal11;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9;
}
}
if(thread_x+T1*3<total_x){
if(thread_y+T2*3<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal15;
t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal16;
}
else if(thread_y+T2*2<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal15;
}
else if(thread_y+T2*1<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14;
}
else if(thread_y+T2*0<total_y) {
t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13;
}
}
__syncthreads();
}
extern "C" void sd_t_d2_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, int p7d, double *t3, double *t2, double *v2) {
size_t stream;
size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3;
size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2;
cudaStream_t *streams;
size_t nstreams,i;
double *t2_d,*v2_d;
size_t3=h2d*h3d*h1d*p4d*p6d*p5d*sizeof(double);
size_t2=p7d*p4d*h1d*h2d*sizeof(double);
size_v2=p7d*h3d*p6d*p5d*sizeof(double);
cudaFuncSetCacheConfig(sd_t_d2_9_kernel, cudaFuncCachePreferShared);
nstreams=1;
size_block_t3=size_t3/nstreams;
size_el_block_t3=size_block_t3/sizeof(double);
//t3d=(double*)getGpuMem(size_t3);
t2_d=(double*)getGpuMem(size_t2);
v2_d=(double*)getGpuMem(size_v2);
streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t));
assert(streams!= NULL);
for(i=0;i<nstreams;++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i])) ;
}
CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice));
p7ld_t2=1;
p4ld_t2=p7d;
h1ld_t2=p4d*p7d;
h2ld_t2=h1d*p4d*p7d;
p7ld_v2=1;
h3ld_v2=p7d;
p6ld_v2=h3d*p7d;
p5ld_v2=p6d*h3d*p7d;
h2ld_t3=1;
h3ld_t3=h2d;
h1ld_t3=h3d*h2d;
p4ld_t3=h1d*h3d*h2d;
p6ld_t3=p4d*h1d*h3d*h2d;
p5ld_t3=p6d*p4d*h1d*h3d*h2d;
int total_x = h3d*p6d*p5d;
int total_y = p4d*h1d*h2d;
dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1)));
for(i=0;i<nstreams;++i){
sd_t_d2_9_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y);
CHECK_ERR("Kernel execution failed");
}
cudaThreadSynchronize();
for(i=0;i<nstreams;++i){
cudaStreamDestroy(streams[i]);}
//freeGpuMem(t3d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
free(streams);
}
extern "C" void sd_t_d2_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) {
sd_t_d2_9_cuda((int)*h1d,(int)*h2d,(int)*h3d,(int)*p4d,(int)*p5d,(int)*p6d,(int)*p7d,t3,t2,v2);
}
#define MAX_h3 64
/* IMPORTANT!!!!
t3_d must be passed as parameter to kernel function. A __global__ function can't access the global variable directly*/
__global__ void compute_energy_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,double* eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, double* energy, double factor, int total_size, double* t3d, double* t3_sd)
{
int h1,h2,p6,p4,p5, h3,i=0;
double e1,e2,e4,e5,e6;
// __shared__ double t2_shm[MAX_h3];
__shared__ double energy_s[T1];
__shared__ double energy2_s[T1];
double inner_fac;
int limit;
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
if(threadIdx.x==0)
{
energy[blockIdx.x]=0;
energy[blockIdx.x+gridDim.x]=0;
energy_s[threadIdx.x] = 0.0;
energy2_s[threadIdx.x] = 0.0;
}
for(int j =0; j<T2*T1;j++) {
thread_x = T2*T1*blockIdx.x + j;
rest_x = thread_x;
__syncthreads();
h2=rest_x%h2d;
rest_x=rest_x/h2d;
h1=rest_x%h1d;
rest_x=rest_x/h1d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
rest_x=rest_x/p5d;
p4=rest_x%p4d;
e1 = eval1[h1];
e2 = eval2[h2];
e4 = eval4[p4];
e5 = eval5[p5];
e6 = eval6[p6];
/*
for(p4=0;p4<p4d;p4++)
for(p5 = 0;p5<p5d;p5++)
for(p6=0;p6<p6d;p6++)
for(h1= 0;h1<h1d;h1++)
for(h2=0;h2<h2d;h2++)
for(h3=0;h3<h3d;h3++) {
inner_fac = -eval4[p4]-eval5[p5]-eval6[p6]+eval1[h1]
+eval2[h2]+eval3[h3];
energy_s[0]+=factor*t3d[i]*t3d[i]/inner_fac;
energy2_s[0]+=factor*t3d[i]*(t3_sd[i]+t3d[i])/inner_fac;
i++;
}
*/
if(thread_x<total_size)
for(int i=0;i<h3d;i++)
{
inner_fac = -e4-e5-e6+e1+e2+eval3[i]; //t2_shm[i];
//ckbn avoid e1 in case we need just (T)
energy_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*t3d[thread_x*h3d+i]/inner_fac;
energy2_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*(t3_sd[thread_x*h3d+i]+t3d[thread_x*h3d+i])/inner_fac;
}
__syncthreads();
}
if(threadIdx.x==0)
{
/* limit = blockDim.x;
if (blockIdx.x == (gridDim.x-1)) limit = total_size%blockDim.x;
for(int i=0;i<limit;i++)
{
energy[blockIdx.x]+=energy_s[i];
energy[blockIdx.x+gridDim.x]+=energy2_s[i];
}
*/
energy[blockIdx.x] = energy_s[0];
energy[blockIdx.x+gridDim.x] = energy2_s[0];
}
__syncthreads();
}
extern "C" void compute_energy(double factor, double* energy, double* eval1, double* eval2,double* eval3,double* eval4,double* eval5,double* eval6,int h1d, int h2d, int h3d, int p4d, int p5d,int p6d, double* host1, double* host2)
//ckbn en_comment, double* total_d, double* total_s)
{
double* energy_d, *energy_h;
double* eval_d1,*eval_d2,*eval_d3,*eval_d4,*eval_d5,*eval_d6;
int size_energy = 2*sizeof(double);
int total_block = DIV_UB((h1d*h2d*p4d*p5d*p6d), (T2*T1));
// int total_block = 1;
int total_elements = h1d*h2d*p4d*p5d*p6d;
energy_d = (double*)getGpuMem(size_energy*total_block*2);
int i=0,in;
double* t3 = (double*)malloc(sizeof(double)*h3d*total_elements);
double* ts3 = (double*)malloc(sizeof(double)*h3d*total_elements);
energy_h = (double*)getHostMem(size_energy*2*total_block);
eval_d1 = (double*)getGpuMem(h1d*sizeof(double));
eval_d2 = (double*)getGpuMem(h2d*sizeof(double));
eval_d3 = (double*)getGpuMem(h3d*sizeof(double));
eval_d4 = (double*)getGpuMem(p4d*sizeof(double));
eval_d5 = (double*)getGpuMem(p5d*sizeof(double));
eval_d6 = (double*)getGpuMem(p6d*sizeof(double));
CUDA_SAFE(cudaMemcpy(eval_d1, eval1, h1d*sizeof(double), cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(eval_d2, eval2, h2d*sizeof(double), cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(eval_d3, eval3, h3d*sizeof(double), cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(eval_d4, eval4, p4d*sizeof(double), cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(eval_d5, eval5, p5d*sizeof(double), cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(eval_d6, eval6, p6d*sizeof(double), cudaMemcpyHostToDevice));
/* for test only */
//printf("host 2 is %f %f\n", host2[0], host2[1]);
// CUDA_SAFE(cudaMemcpy(t3_s_d, host2, total_elements*h3d*sizeof(double), cudaMemcpyHostToDevice));
dim3 dimBlock(1); //T2*T1);
dim3 dimGrid(total_block);
compute_energy_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d,p6d, eval_d1,eval_d2,eval_d3,eval_d4,eval_d5,eval_d6,energy_d, factor, h1d*h2d*p4d*p5d*p6d, t3_d, t3_s_d);
cudaThreadSynchronize();
//CHECK_ERR("Kernel execution failed");
CUDA_SAFE(cudaMemcpy(((char *) energy_h) , ((char *) energy_d) ,
size_energy*total_block*2, cudaMemcpyDeviceToHost));
for(int i=1;i<dimGrid.x;i++)
{
energy_h[0]+=energy_h[i];
energy_h[dimGrid.x]+=energy_h[i+dimGrid.x];
}
// printf("CUDA energy_h is %f %f %d %d %d %d %d %d\n", energy_h[0], energy_h[dimGrid.x]); //, total_size, h1d, h2d, p4d, p5d,p6d);
/*
CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_d) , sizeof(double)*h3d*total_elements, cudaMemcpyDeviceToHost));
CUDA_SAFE(cudaMemcpy(((char *) ts3) , ((char *) t3_s_d) , sizeof(double)*h3d*total_elements, cudaMemcpyDeviceToHost));
total_s[0]=0.0, total_d[0]=0.0;
for(int i=0;i<h3d*total_elements;i++) {
total_s[0] += ts3[i];
total_d[0] += t3[i];
}
*/
// printf("Total doubles and singles %f, %f\n", total_d, total_s);
energy[0] = energy_h[0];
energy[1] = energy_h[dimGrid.x];
freeGpuMem(energy_d);
freeGpuMem(eval_d1);
freeGpuMem(eval_d2);
freeGpuMem(eval_d3);
freeGpuMem(eval_d4);
freeGpuMem(eval_d5);
freeGpuMem(eval_d6);
freeHostMem(energy_h);
}
extern "C" void
compute_en_(double * factor, double * energy, double * eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double* host1, double* host2)
//ckbn en_comment,double* total_d, double* total_s)
{
compute_energy((double) *factor, energy, eval1,eval2, eval3, eval4, eval5, eval6,(int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, host1, host2);
//ckbn en_comment ,total_d, total_s);
}
//__device__ double* t3_d;
extern "C" void set_dev_mem_s(int h1d, int h2d, int h3d, int p4d, int p5d,int p6d)
{
int size_t3;
size_t3 = h1d*h2d*h3d*p4d*p5d*p6d;
t3_s_d = (double *) getGpuMem(size_t3*sizeof(double));
cudaMemset(t3_s_d,0,size_t3*sizeof(double));
}
extern "C" void
dev_mem_s_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d)
{
set_dev_mem_s((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_1_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3, double *t2_d, double *v2_d,int p4, int total_x, double* t3d) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_1_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
//CUDA_SAFE(cudaMalloc((void**) &t3_d, size_t3));
//CUDA_SAFE(cudaMalloc((void**) &t2_d, size_t2));
//CUDA_SAFE(cudaMalloc((void**) &v2_d, size_v2));
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
}
/*
st = timer();
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/
cudaThreadSynchronize();
// CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
// cudaFree(t2_d);
// cudaFree(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_1_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_1_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_2_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t2_d, double *v2_d,int p4, int total_x, double* t3d) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_2_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}*/
//CUDA_SAFE(cudaMalloc((void**) &t2_d, size_t2));
//CUDA_SAFE(cudaMalloc((void**) &v2_d, size_v2));
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
/* assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}*/
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
// for(i=0;i<nstreams;++i){
sd_t_s1_2_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
// }
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
cudaThreadSynchronize();
// CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
/*
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}*/
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_2_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_2_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
extern "C" void
sd_t_s1_3_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p4ld_t3 = p5d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/ cudaThreadSynchronize();
//CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_3_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_3_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_4_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
/* assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}*/
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
i=0;
// for(i=0;i<nstreams;++i){
sd_t_s1_4_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
//sd_t_s1_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
// }
cudaThreadSynchronize();
/* CUDA_SAFE(cudaMemcpy(((char *) t3_p) , ((char *) t3_d) , size_block_t3, cudaMemcpyDeviceToHost));
printf("Time for Async DeviceToHost %f\n", et-st);
stream = 0;
// while (stream < nstreams) {
// while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = t3_p; //[stream * size_el_block_t3];
double *dst = t3; //[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] -= src[i];
}
// stream++;
// }
*/
// cudaThreadSynchronize();
/*
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}*/
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_4_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_4_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_5_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_5_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d ;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_5_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}
*/
cudaThreadSynchronize();
//CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_5_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_5_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h1,h3,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_6_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_6_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
p5ld_v2 = p6d * h3d * h2d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p6ld_t3 = h1d * h2d * h3d;
p4ld_t3 = p6d * h1d * h2d * h3d;
p5ld_t3 = p4d * p6d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_6_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/* for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
cudaThreadSynchronize();
//CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_6_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_6_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h3,h2,h1,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_7_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
extern "C" void
sd_t_s1_7_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h2ld_t3 = h3d;
h1ld_t3 = h2d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
cudaThreadSynchronize();
//CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
#undef T1
#undef T2
#undef Tcomm
extern "C" void
sd_t_s1_7_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_7_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
#define T1 16
#define T2 16
#define Tcomm 16
__global__ void sd_t_s1_8_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
}
/*----------------------------------------------------------------------*
*t3[h3,h1,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
#define T1 16
#define T2 16
#define Tcomm 16
extern "C" void
sd_t_s1_8_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h3ld_t3 = 1;
h1ld_t3 = h3d;
h2ld_t3 = h1d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_8_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
cudaThreadSynchronize();
// CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
// freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_8_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_8_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
/*----------------------------------------------------------------------*
*t3[h1,h3,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5]
*----------------------------------------------------------------------*/
extern "C" void
sd_t_s1_9_cuda(int h1d, int h2d, int h3d, int p4d, int p5d, int p6d, double *t3, double *t2, double *v2)
{
double st, et;
//ckbn st = timer();
size_t stream;
size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2,
p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3,
p5ld_t3, p4ld_t3;
size_t size_t3, size_block_t3, size_el_block_t3, size_t2,
size_v2;
cudaStream_t *streams;
size_t nstreams, i;
double *t2_d, *v2_d, *t3_p;
size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double);
size_t2 = p4d * h1d * sizeof(double);
size_v2 = h3d * h2d * p6d * p5d * sizeof(double);
nstreams = 1;
size_block_t3 = size_t3 / nstreams;
size_el_block_t3 = size_block_t3 / sizeof(double);
/* if(first==1)
{
t3_d = (double *) getGpuMem(size_t3);
cudaMemset(t3_d,0,size_t3*sizeof(double));
first = 0;
}
*/
// t3_d = (double *) getGpuMem(size_t3);
t2_d = (double *) getGpuMem(size_t2);
v2_d = (double *) getGpuMem(size_v2);
t3_p = (double *) getHostMem(size_t3);
streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t));
assert(streams != NULL);
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaStreamCreate(&streams[i]));
}
CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice));
CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice));
p4ld_t2 = 1;
h1ld_t2 = p4d;
h3ld_v2 = 1;
h2ld_v2 = h3d;
p6ld_v2 = h3d * h2d;
// p5ld_v2 = p6d * h3d * p7d;
h1ld_t3 = 1;
h3ld_t3 = h1d;
h2ld_t3 = h1d * h3d;
p4ld_t3 = h1d * h2d * h3d;
// p5ld_t3 = p6d * h1d * h2d * h3d;
p6ld_t3 = p4d * h1d * h2d * h3d;
int total_x = h3d*h2d*p6d*p5d;
dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1);
for(i=0;i<nstreams;++i){
sd_t_s1_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x);
CHECK_ERR("Kernel execution failed");
}
/*
for (i = 0; i < nstreams; ++i) {
CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i]));
}
stream = 0;
while (stream < nstreams) {
while (cudaStreamQuery(streams[stream]) != cudaSuccess);
double *src = &t3_p[stream * size_el_block_t3];
double *dst = &t3[stream * size_el_block_t3];
for (i = 0; i < size_el_block_t3; ++i) {
dst[i] = src[i];
}
stream++;
}*/
cudaThreadSynchronize();
//CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost));
// printf("out is %lf\n", t3_p[0]);
for (i = 0; i < nstreams; ++i) {
cudaStreamDestroy(streams[i]);
}
//freeGpuMem(t3_d);
freeGpuMem(t2_d);
freeGpuMem(v2_d);
freeHostMem(t3_p);
free(streams);
}
extern "C" void
sd_t_s1_9_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2)
{
sd_t_s1_9_cuda((int) *h1d, (int) *h2d, (int) *h3d, (int) *p4d, (int) *p5d, (int) *p6d, t3, t2, v2);
}
|
39b6b2c77949a247ef50a4c2b3409d7304107f3e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define IND(i, j) ((i) * (N + 2) + (j))
enum {
N = 1024,
ITERS_MAX = 1 << 10,
BLOCK_SIZE = 16
};
typedef uint8_t cell_t;
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
__global__ void copy_ghost_rows(cell_t *grid, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i <= n + 1) {
// Bottom ghost row: [N + 1][0..N + 1] <== [1][0..N + 1]
grid[IND(N + 1, i)] = grid[IND(1, i)];
// Top ghost row: [0][0..N + 1] <== [N][0..N + 1]
grid[IND(0, i)] = grid[IND(N, i)];
}
}
__global__ void copy_ghost_cols(cell_t *grid, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 1;
if (i <= n) {
// Right ghost column: [1..N][N + 1] <== [1..N][1]
grid[IND(i, N + 1)] = grid[IND(i, 1)];
// Left ghost column: [1..N][1] <== [1..N][N]
grid[IND(i, 0)] = grid[IND(i, N)];
}
}
__global__ void update_cells(cell_t *grid, cell_t *newgrid, int n)
{
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if (i <= n && j <= n) {
int states[2][9] = {
{0, 0, 0, 1, 0, 0, 0, 0, 0}, /* New states for a dead cell */
{0, 0, 1, 1, 0, 0, 0, 0, 0} /* New states for an alive cell */
};
int nneibs = grid[IND(i + 1, j)] + grid[IND(i - 1, j)] +
grid[IND(i, j + 1)] + grid[IND(i, j - 1)] +
grid[IND(i + 1, j + 1)] + grid[IND(i - 1, j - 1)] +
grid[IND(i - 1, j + 1)] + grid[IND(i + 1, j - 1)];
cell_t state = grid[IND(i, j)];
newgrid[IND(i, j)] = states[state][nneibs];
}
}
int main(int argc, char* argv[])
{
// Grid with periodic boundary conditions (ghost cells)
size_t ncells = (N + 2) * (N + 2);
size_t size = sizeof(cell_t) * ncells;
cell_t *grid = (cell_t *)malloc(size);
// Initial population
srand(0);
for (int i = 1; i <= N; i++)
for (int j = 1; j <= N; j++)
grid[IND(i, j)] = rand() % 2;
cell_t *d_grid, *d_newgrid;
double tmem = -wtime();
hipMalloc((void **)&d_grid, size);
hipMalloc((void **)&d_newgrid, size);
hipMemcpy(d_grid, grid, size, hipMemcpyHostToDevice);
tmem += wtime();
// 1d grids for copying ghost cells
dim3 block(BLOCK_SIZE, 1, 1);
dim3 cols_grid((N + block.x - 1) / block.x, 1, 1);
dim3 rows_grid((N + 2 + block.x - 1) / block.x, 1, 1);
// 2d grid for updating cells: one thread per cell
dim3 block2d(BLOCK_SIZE, BLOCK_SIZE, 1);
int nblocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 grid2d(nblocks, nblocks, 1);
double t = wtime();
int iter = 0;
for (iter = 0; iter < ITERS_MAX; iter++) {
// Copy ghost cells
hipLaunchKernelGGL(( copy_ghost_cols), dim3(cols_grid), dim3(block), 0, 0, d_grid, N);
hipLaunchKernelGGL(( copy_ghost_rows), dim3(rows_grid), dim3(block), 0, 0, d_grid, N);
// Update cells
hipLaunchKernelGGL(( update_cells), dim3(grid2d), dim3(block2d), 0, 0, d_grid, d_newgrid, N);
// Swap grids
cell_t *p = d_grid;
d_grid = d_newgrid;
d_newgrid = p;
}
hipDeviceSynchronize();
t = wtime() - t;
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
tmem -= wtime();
hipMemcpy(grid, d_grid, size, hipMemcpyDeviceToHost);
tmem += wtime();
/*
for (int i = 0; i < N + 2; i++) {
for (int j = 0; j < N + 2; j++)
printf("%1d ", grid[IND(i, j)]);
printf("\n");
}
*/
size_t total = 0;
for (int i = 1; i <= N; i++) {
for (int j = 1; j <= N; j++)
total += grid[IND(i, j)];
}
printf("Game of Life: N = %d, iterations = %d\n", N, iter);
printf("Total alive cells: %lu\n", total);
printf("Iterations time (sec.): %.6f\n", t);
printf("GPU memory ops. time (sec.): %.6f\n", tmem);
printf("Iters per sec.: %.2f\n", iter / t);
printf("Total time (sec.): %.6f\n", t + tmem);
free(grid);
hipFree(d_grid);
hipFree(d_newgrid);
return 0;
}
|
39b6b2c77949a247ef50a4c2b3409d7304107f3e.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define IND(i, j) ((i) * (N + 2) + (j))
enum {
N = 1024,
ITERS_MAX = 1 << 10,
BLOCK_SIZE = 16
};
typedef uint8_t cell_t;
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
__global__ void copy_ghost_rows(cell_t *grid, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i <= n + 1) {
// Bottom ghost row: [N + 1][0..N + 1] <== [1][0..N + 1]
grid[IND(N + 1, i)] = grid[IND(1, i)];
// Top ghost row: [0][0..N + 1] <== [N][0..N + 1]
grid[IND(0, i)] = grid[IND(N, i)];
}
}
__global__ void copy_ghost_cols(cell_t *grid, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 1;
if (i <= n) {
// Right ghost column: [1..N][N + 1] <== [1..N][1]
grid[IND(i, N + 1)] = grid[IND(i, 1)];
// Left ghost column: [1..N][1] <== [1..N][N]
grid[IND(i, 0)] = grid[IND(i, N)];
}
}
__global__ void update_cells(cell_t *grid, cell_t *newgrid, int n)
{
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if (i <= n && j <= n) {
int states[2][9] = {
{0, 0, 0, 1, 0, 0, 0, 0, 0}, /* New states for a dead cell */
{0, 0, 1, 1, 0, 0, 0, 0, 0} /* New states for an alive cell */
};
int nneibs = grid[IND(i + 1, j)] + grid[IND(i - 1, j)] +
grid[IND(i, j + 1)] + grid[IND(i, j - 1)] +
grid[IND(i + 1, j + 1)] + grid[IND(i - 1, j - 1)] +
grid[IND(i - 1, j + 1)] + grid[IND(i + 1, j - 1)];
cell_t state = grid[IND(i, j)];
newgrid[IND(i, j)] = states[state][nneibs];
}
}
int main(int argc, char* argv[])
{
// Grid with periodic boundary conditions (ghost cells)
size_t ncells = (N + 2) * (N + 2);
size_t size = sizeof(cell_t) * ncells;
cell_t *grid = (cell_t *)malloc(size);
// Initial population
srand(0);
for (int i = 1; i <= N; i++)
for (int j = 1; j <= N; j++)
grid[IND(i, j)] = rand() % 2;
cell_t *d_grid, *d_newgrid;
double tmem = -wtime();
cudaMalloc((void **)&d_grid, size);
cudaMalloc((void **)&d_newgrid, size);
cudaMemcpy(d_grid, grid, size, cudaMemcpyHostToDevice);
tmem += wtime();
// 1d grids for copying ghost cells
dim3 block(BLOCK_SIZE, 1, 1);
dim3 cols_grid((N + block.x - 1) / block.x, 1, 1);
dim3 rows_grid((N + 2 + block.x - 1) / block.x, 1, 1);
// 2d grid for updating cells: one thread per cell
dim3 block2d(BLOCK_SIZE, BLOCK_SIZE, 1);
int nblocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 grid2d(nblocks, nblocks, 1);
double t = wtime();
int iter = 0;
for (iter = 0; iter < ITERS_MAX; iter++) {
// Copy ghost cells
copy_ghost_cols<<<cols_grid, block>>>(d_grid, N);
copy_ghost_rows<<<rows_grid, block>>>(d_grid, N);
// Update cells
update_cells<<<grid2d, block2d>>>(d_grid, d_newgrid, N);
// Swap grids
cell_t *p = d_grid;
d_grid = d_newgrid;
d_newgrid = p;
}
cudaDeviceSynchronize();
t = wtime() - t;
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
tmem -= wtime();
cudaMemcpy(grid, d_grid, size, cudaMemcpyDeviceToHost);
tmem += wtime();
/*
for (int i = 0; i < N + 2; i++) {
for (int j = 0; j < N + 2; j++)
printf("%1d ", grid[IND(i, j)]);
printf("\n");
}
*/
size_t total = 0;
for (int i = 1; i <= N; i++) {
for (int j = 1; j <= N; j++)
total += grid[IND(i, j)];
}
printf("Game of Life: N = %d, iterations = %d\n", N, iter);
printf("Total alive cells: %lu\n", total);
printf("Iterations time (sec.): %.6f\n", t);
printf("GPU memory ops. time (sec.): %.6f\n", tmem);
printf("Iters per sec.: %.2f\n", iter / t);
printf("Total time (sec.): %.6f\n", t + tmem);
free(grid);
cudaFree(d_grid);
cudaFree(d_newgrid);
return 0;
}
|
a2d59b8fec6e84f4f7379360523e01537490151c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cuConvertHSVToRGBKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float4 *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
float4 *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
size_t stride = 2;
int width = XSIZE;
int height = YSIZE;
bool denormalize = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cuConvertHSVToRGBKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,stride,width,height,denormalize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cuConvertHSVToRGBKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,stride,width,height,denormalize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cuConvertHSVToRGBKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,stride,width,height,denormalize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
a2d59b8fec6e84f4f7379360523e01537490151c.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cuConvertHSVToRGBKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float4 *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
float4 *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
size_t stride = 2;
int width = XSIZE;
int height = YSIZE;
bool denormalize = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cuConvertHSVToRGBKernel<<<gridBlock,threadBlock>>>(src,dst,stride,width,height,denormalize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cuConvertHSVToRGBKernel<<<gridBlock,threadBlock>>>(src,dst,stride,width,height,denormalize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cuConvertHSVToRGBKernel<<<gridBlock,threadBlock>>>(src,dst,stride,width,height,denormalize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
175448f780e835f483bc3668262167d71e27cb64.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Reduction operations
* This all seems intimidating, but it's pretty directly adapted from
* http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf
*/
#include "collectiveops_cuda_generic.cuh"
#include "gpu/cuda/core/dconsts_core.cuh"
#include "gpu/cuda/core/errorhandler_cuda.cuh"
#include "utils/utils.h"//For templated max/min/sum
#define COL_THREADS_X (32) //TODO read from config
#define COL_THREADS_Y (4)
#define COL_ELEMS_PER_THREAD (1) //TODO ifndef, define here, else get from the compile flag
//Comparison funcs (template magic :D)
template<class T>
__device__ T dmax(const T a, const T b) { return a > b ? a : b; }
template<class T>
__device__ T dmin(const T a, const T b) { return a < b ? a : b; }
template<class T>
__device__ T dsum(const T a, const T b) { return a + b; }
//The initial values when starting the reduction
template<class T>
__device__ T ddist(const T a, const T b, const T c) { return sqrt(a*a + b*b + c*c); }
template<class T>
__device__ T dsqrsum(const T a, const T b, const T c) { return a*a + b*b + c*c; }
template<class T>
__device__ T dexpsqrscal(const T a, const T b, const T c) { return exp(a)*exp(a); }
template<class T>
__device__ T dscal(const T a, const T b, const T c) { return a; }
//Function pointer definitions
typedef real (*ReduceFunc)(real, real);
typedef real (*ReduceInitFunc)(real, real, real);
template<ReduceFunc reduce_op>
__device__ void reduce_warp(volatile real* shared, int tid)
{
shared[tid] = reduce_op(shared[tid], shared[tid+32]);__syncthreads();
shared[tid] = reduce_op(shared[tid], shared[tid+16]);__syncthreads();
shared[tid] = reduce_op(shared[tid], shared[tid+8]);__syncthreads();
shared[tid] = reduce_op(shared[tid], shared[tid+4]);__syncthreads();
shared[tid] = reduce_op(shared[tid], shared[tid+2]);__syncthreads();
shared[tid] = reduce_op(shared[tid], shared[tid+1]);__syncthreads();
}
void init_reduction_array_cuda_generic(ReductionArray* reduct_arr, CParamConfig* cparams)
{
// The device variable where the maximum found value found is written to
CUDA_ERRCHK( hipMalloc((real**) &reduct_arr->d_vec_res, sizeof(real)) );
//An intermediate array used in computing the reduction
dim3 bpg;
bpg.x = ceil(cparams->nx / (double)COL_THREADS_X);
bpg.y = ceil(cparams->ny / (double)COL_THREADS_Y);
bpg.z = ceil(cparams->nz / (double)COL_ELEMS_PER_THREAD);
const int blocks_total = bpg.x * bpg.y * bpg.z;
CUDA_ERRCHK( hipMalloc((real**) &reduct_arr->d_partial_result, sizeof(real)*blocks_total) );
}
void destroy_reduction_array_cuda_generic(ReductionArray* reduct_arr)
{
CUDA_ERRCHK( hipFree(reduct_arr->d_vec_res) ); reduct_arr->d_vec_res = NULL;
CUDA_ERRCHK( hipFree(reduct_arr->d_partial_result) ); reduct_arr->d_partial_result = NULL;
}
template <unsigned int block_size, ReduceFunc reduce_op>
__global__ void reduce(real* dest, real* src, int problem_size)
{
int tid = threadIdx.x;
int i = tid + blockDim.x;//tid + offset
extern __shared__ real shared[];
shared[tid] = src[tid];
//Add sequantially all blocks above block size
while (i < problem_size) {
shared[tid] = reduce_op(shared[tid], src[i]);
i += blockDim.x;
}
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
shared[tid] = reduce_op(shared[tid], shared[tid+512]);
__syncthreads();
}
}
if (block_size >= 512) {
if (tid < 256) {
shared[tid] = reduce_op(shared[tid], shared[tid+256]);
__syncthreads();
}
}
if (block_size >= 256) {
if (tid < 128) {
shared[tid] = reduce_op(shared[tid], shared[tid+128]);
__syncthreads();
}
}
if (block_size >= 128) {
if (tid < 64) {
shared[tid] = reduce_op(shared[tid], shared[tid+64]);
__syncthreads();
}
}
if (tid < 32)
reduce_warp<reduce_op>(shared, tid);
if (tid == 0) *dest = shared[0];
}
//Calculates the maximum vector magnitude found in the system
template <unsigned int block_size, ReduceFunc reduce_op, ReduceInitFunc reduce_init_op>//inline const T& min(const T& a, const T& b)
__global__ void reduce_initial(real* d_partial_result, real* d_vec_x, real* d_vec_y = NULL, real* d_vec_z = NULL)
{
extern __shared__ real vec_shared[];
const int tid = threadIdx.x + threadIdx.y*blockDim.x;//index inside the shared mem array
const int tx = threadIdx.x + blockIdx.x*blockDim.x + d_nx_min;
const int ty = threadIdx.y + blockIdx.y*blockDim.y + d_ny_min;
const int tz = blockIdx.z*COL_ELEMS_PER_THREAD + d_nz_min;
const int base_idx = tx + ty*d_mx + tz*d_mx*d_my;
assert(tx >= d_nx_min);
assert(tx < d_nx_max);
assert(ty >= d_ny_min);
assert(ty < d_ny_max);
assert(tz >= d_nz_min);
assert(tz < d_nz_max);
real vec;
const bool REDUCE_VEC = (d_vec_y != NULL);
if (REDUCE_VEC)
vec_shared[tid] = reduce_init_op(d_vec_x[base_idx],
d_vec_y[base_idx],
d_vec_z[base_idx]); //init first value
else
vec_shared[tid] = reduce_init_op(d_vec_x[base_idx], 0, 0);
for (int i=1; i < COL_ELEMS_PER_THREAD && tz+i < d_nz_max; i++)
{
const int grid_idx = base_idx + i*d_mx*d_my;
assert(tz+i < d_nz_max);
if (REDUCE_VEC)
vec = reduce_init_op(d_vec_x[grid_idx],
d_vec_y[grid_idx],
d_vec_z[grid_idx]); //init first value
else
vec = reduce_init_op(d_vec_x[grid_idx], 0, 0);
vec_shared[tid] = reduce_op(vec_shared[tid], vec);
}
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
vec_shared[tid] = reduce_op(vec_shared[tid], vec_shared[tid+512]);
__syncthreads();
}
}
if (block_size >= 512) {
if (tid < 256) {
vec_shared[tid] = reduce_op(vec_shared[tid], vec_shared[tid+256]);
__syncthreads();
}
}
if (block_size >= 256) {
if (tid < 128) {
vec_shared[tid] = reduce_op(vec_shared[tid], vec_shared[tid+128]);
__syncthreads();
}
}
if (block_size >= 128) {
if (tid < 64) {
vec_shared[tid] = reduce_op(vec_shared[tid], vec_shared[tid+64]);
__syncthreads();
}
}
if (tid < 32)
reduce_warp<reduce_op>(vec_shared, tid);
if (tid == 0) d_partial_result[blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y] = vec_shared[0];
}
/*
* Calculates the max vec found in the grid
* Puts the result in reduct_arr->d_vec_res (in device memory)
*/
template<ReduceFunc reduce_op, ReduceInitFunc reduce_init_op>
void reduce_cuda_generic(ReductionArray* reduct_arr, CParamConfig* cparams, real* d_vec_x, real* d_vec_y = NULL, real* d_vec_z = NULL)
{
//-------------------------------------------------
dim3 tpb, bpg;
tpb.x = COL_THREADS_X;
tpb.y = COL_THREADS_Y;
tpb.z = 1; // 2D blockdims only
const int SMEM_PER_BLOCK = tpb.x * tpb.y *tpb.z * sizeof(real);
bpg.x = ceil((real) cparams->nx / (real)COL_THREADS_X);
bpg.y = ceil((real) cparams->ny / (real)COL_THREADS_Y);
bpg.z = ceil((real) cparams->nz / (real)COL_ELEMS_PER_THREAD);
const int BLOCKS_TOTAL = bpg.x * bpg.y * bpg.z;
//------------------------------------------------
//Collectiveops works only when BLOCKS_TOTAL is divisible by the thread block size.
//This is not good and collectiveops should be rewritten to support arbitrary grid dims
assert(BLOCKS_TOTAL % (tpb.x * tpb.y * tpb.z) == 0);
switch (tpb.x*tpb.y*tpb.z)
{
case 512:
hipLaunchKernelGGL(( reduce_initial<512, reduce_op, reduce_init_op>), dim3(bpg), dim3(tpb), SMEM_PER_BLOCK, 0, reduct_arr->d_partial_result, d_vec_x, d_vec_y, d_vec_z); CUDA_ERRCHK_KERNEL(); break;
case 256:
hipLaunchKernelGGL(( reduce_initial<256, reduce_op, reduce_init_op>), dim3(bpg), dim3(tpb), SMEM_PER_BLOCK, 0, reduct_arr->d_partial_result, d_vec_x, d_vec_y, d_vec_z); CUDA_ERRCHK_KERNEL(); break;
case 128:
hipLaunchKernelGGL(( reduce_initial<128, reduce_op, reduce_init_op>), dim3(bpg), dim3(tpb), SMEM_PER_BLOCK, 0, reduct_arr->d_partial_result, d_vec_x, d_vec_y, d_vec_z); CUDA_ERRCHK_KERNEL(); break;
default:
printf("INCORRECT THREAD SIZE!\n");
exit(EXIT_FAILURE);
}
if (BLOCKS_TOTAL >= 1024) {
hipLaunchKernelGGL(( reduce<1024, reduce_op>), dim3(1), dim3(1024), 1024*sizeof(real), 0, reduct_arr->d_vec_res, reduct_arr->d_partial_result, BLOCKS_TOTAL); CUDA_ERRCHK_KERNEL();
} else if (BLOCKS_TOTAL >= 512) {
hipLaunchKernelGGL(( reduce<512, reduce_op>), dim3(1), dim3(512), 512*sizeof(real), 0, reduct_arr->d_vec_res, reduct_arr->d_partial_result, BLOCKS_TOTAL); CUDA_ERRCHK_KERNEL();
} else if (BLOCKS_TOTAL >= 256) {
hipLaunchKernelGGL(( reduce<256, reduce_op>), dim3(1), dim3(256), 256*sizeof(real), 0, reduct_arr->d_vec_res, reduct_arr->d_partial_result, BLOCKS_TOTAL); CUDA_ERRCHK_KERNEL();
} else if (BLOCKS_TOTAL >= 128) {
hipLaunchKernelGGL(( reduce<128, reduce_op>), dim3(1), dim3(128), 128*sizeof(real), 0, reduct_arr->d_vec_res, reduct_arr->d_partial_result, BLOCKS_TOTAL); CUDA_ERRCHK_KERNEL();
} else if (BLOCKS_TOTAL >= 16) {
hipLaunchKernelGGL(( reduce<16, reduce_op>), dim3(1), dim3(16), 16*sizeof(real), 0, reduct_arr->d_vec_res, reduct_arr->d_partial_result, BLOCKS_TOTAL); CUDA_ERRCHK_KERNEL();
} else {
printf("INCORRECT BLOCKS_TOTAL (= %d) IN collectiveops.cu!\n", BLOCKS_TOTAL);
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
//We're done
return;
}
//template<ReductType t>
real get_reduction_cuda_generic(ReductionArray* reduct_arr, ReductType t, CParamConfig* cparams, real* d_a, real* d_b, real* d_c)
{
real res;
switch (t) {
case MAX_VEC_UU:
reduce_cuda_generic<dmax, ddist>(reduct_arr, cparams, d_a, d_b, d_c);
break;
case MIN_VEC_UU:
reduce_cuda_generic<dmin, ddist>(reduct_arr, cparams, d_a, d_b, d_c);
break;
case RMS_VEC_UU:
reduce_cuda_generic<dsum, dsqrsum>(reduct_arr, cparams, d_a, d_b, d_c);
break;
case MAX_SCAL:
reduce_cuda_generic<dmax, dscal>(reduct_arr, cparams, d_a);
break;
case MIN_SCAL:
reduce_cuda_generic<dmin, dscal>(reduct_arr, cparams, d_a);
break;
case RMS_SCAL:
reduce_cuda_generic<dsum, dsqrsum>(reduct_arr, cparams, d_a);
break;
case RMS_EXP:
reduce_cuda_generic<dsum, dexpsqrscal>(reduct_arr, cparams, d_a);
break;
default:
CRASH("Invalid type!");
}
hipDeviceSynchronize();
CUDA_ERRCHK( hipMemcpy(&res, (real*)reduct_arr->d_vec_res, sizeof(real), hipMemcpyDeviceToHost) );
hipDeviceSynchronize();
return res;
}
|
175448f780e835f483bc3668262167d71e27cb64.cu
|
/*
* Reduction operations
* This all seems intimidating, but it's pretty directly adapted from
* http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf
*/
#include "collectiveops_cuda_generic.cuh"
#include "gpu/cuda/core/dconsts_core.cuh"
#include "gpu/cuda/core/errorhandler_cuda.cuh"
#include "utils/utils.h"//For templated max/min/sum
#define COL_THREADS_X (32) //TODO read from config
#define COL_THREADS_Y (4)
#define COL_ELEMS_PER_THREAD (1) //TODO ifndef, define here, else get from the compile flag
//Comparison funcs (template magic :D)
template<class T>
__device__ T dmax(const T a, const T b) { return a > b ? a : b; }
template<class T>
__device__ T dmin(const T a, const T b) { return a < b ? a : b; }
template<class T>
__device__ T dsum(const T a, const T b) { return a + b; }
//The initial values when starting the reduction
template<class T>
__device__ T ddist(const T a, const T b, const T c) { return sqrt(a*a + b*b + c*c); }
template<class T>
__device__ T dsqrsum(const T a, const T b, const T c) { return a*a + b*b + c*c; }
template<class T>
__device__ T dexpsqrscal(const T a, const T b, const T c) { return exp(a)*exp(a); }
template<class T>
__device__ T dscal(const T a, const T b, const T c) { return a; }
//Function pointer definitions
typedef real (*ReduceFunc)(real, real);
typedef real (*ReduceInitFunc)(real, real, real);
template<ReduceFunc reduce_op>
__device__ void reduce_warp(volatile real* shared, int tid)
{
shared[tid] = reduce_op(shared[tid], shared[tid+32]);__syncthreads();
shared[tid] = reduce_op(shared[tid], shared[tid+16]);__syncthreads();
shared[tid] = reduce_op(shared[tid], shared[tid+8]);__syncthreads();
shared[tid] = reduce_op(shared[tid], shared[tid+4]);__syncthreads();
shared[tid] = reduce_op(shared[tid], shared[tid+2]);__syncthreads();
shared[tid] = reduce_op(shared[tid], shared[tid+1]);__syncthreads();
}
void init_reduction_array_cuda_generic(ReductionArray* reduct_arr, CParamConfig* cparams)
{
// The device variable where the maximum found value found is written to
CUDA_ERRCHK( cudaMalloc((real**) &reduct_arr->d_vec_res, sizeof(real)) );
//An intermediate array used in computing the reduction
dim3 bpg;
bpg.x = ceil(cparams->nx / (double)COL_THREADS_X);
bpg.y = ceil(cparams->ny / (double)COL_THREADS_Y);
bpg.z = ceil(cparams->nz / (double)COL_ELEMS_PER_THREAD);
const int blocks_total = bpg.x * bpg.y * bpg.z;
CUDA_ERRCHK( cudaMalloc((real**) &reduct_arr->d_partial_result, sizeof(real)*blocks_total) );
}
void destroy_reduction_array_cuda_generic(ReductionArray* reduct_arr)
{
CUDA_ERRCHK( cudaFree(reduct_arr->d_vec_res) ); reduct_arr->d_vec_res = NULL;
CUDA_ERRCHK( cudaFree(reduct_arr->d_partial_result) ); reduct_arr->d_partial_result = NULL;
}
template <unsigned int block_size, ReduceFunc reduce_op>
__global__ void reduce(real* dest, real* src, int problem_size)
{
int tid = threadIdx.x;
int i = tid + blockDim.x;//tid + offset
extern __shared__ real shared[];
shared[tid] = src[tid];
//Add sequantially all blocks above block size
while (i < problem_size) {
shared[tid] = reduce_op(shared[tid], src[i]);
i += blockDim.x;
}
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
shared[tid] = reduce_op(shared[tid], shared[tid+512]);
__syncthreads();
}
}
if (block_size >= 512) {
if (tid < 256) {
shared[tid] = reduce_op(shared[tid], shared[tid+256]);
__syncthreads();
}
}
if (block_size >= 256) {
if (tid < 128) {
shared[tid] = reduce_op(shared[tid], shared[tid+128]);
__syncthreads();
}
}
if (block_size >= 128) {
if (tid < 64) {
shared[tid] = reduce_op(shared[tid], shared[tid+64]);
__syncthreads();
}
}
if (tid < 32)
reduce_warp<reduce_op>(shared, tid);
if (tid == 0) *dest = shared[0];
}
//Calculates the maximum vector magnitude found in the system
template <unsigned int block_size, ReduceFunc reduce_op, ReduceInitFunc reduce_init_op>//inline const T& min(const T& a, const T& b)
__global__ void reduce_initial(real* d_partial_result, real* d_vec_x, real* d_vec_y = NULL, real* d_vec_z = NULL)
{
extern __shared__ real vec_shared[];
const int tid = threadIdx.x + threadIdx.y*blockDim.x;//index inside the shared mem array
const int tx = threadIdx.x + blockIdx.x*blockDim.x + d_nx_min;
const int ty = threadIdx.y + blockIdx.y*blockDim.y + d_ny_min;
const int tz = blockIdx.z*COL_ELEMS_PER_THREAD + d_nz_min;
const int base_idx = tx + ty*d_mx + tz*d_mx*d_my;
assert(tx >= d_nx_min);
assert(tx < d_nx_max);
assert(ty >= d_ny_min);
assert(ty < d_ny_max);
assert(tz >= d_nz_min);
assert(tz < d_nz_max);
real vec;
const bool REDUCE_VEC = (d_vec_y != NULL);
if (REDUCE_VEC)
vec_shared[tid] = reduce_init_op(d_vec_x[base_idx],
d_vec_y[base_idx],
d_vec_z[base_idx]); //init first value
else
vec_shared[tid] = reduce_init_op(d_vec_x[base_idx], 0, 0);
for (int i=1; i < COL_ELEMS_PER_THREAD && tz+i < d_nz_max; i++)
{
const int grid_idx = base_idx + i*d_mx*d_my;
assert(tz+i < d_nz_max);
if (REDUCE_VEC)
vec = reduce_init_op(d_vec_x[grid_idx],
d_vec_y[grid_idx],
d_vec_z[grid_idx]); //init first value
else
vec = reduce_init_op(d_vec_x[grid_idx], 0, 0);
vec_shared[tid] = reduce_op(vec_shared[tid], vec);
}
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
vec_shared[tid] = reduce_op(vec_shared[tid], vec_shared[tid+512]);
__syncthreads();
}
}
if (block_size >= 512) {
if (tid < 256) {
vec_shared[tid] = reduce_op(vec_shared[tid], vec_shared[tid+256]);
__syncthreads();
}
}
if (block_size >= 256) {
if (tid < 128) {
vec_shared[tid] = reduce_op(vec_shared[tid], vec_shared[tid+128]);
__syncthreads();
}
}
if (block_size >= 128) {
if (tid < 64) {
vec_shared[tid] = reduce_op(vec_shared[tid], vec_shared[tid+64]);
__syncthreads();
}
}
if (tid < 32)
reduce_warp<reduce_op>(vec_shared, tid);
if (tid == 0) d_partial_result[blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y] = vec_shared[0];
}
/*
* Calculates the max vec found in the grid
* Puts the result in reduct_arr->d_vec_res (in device memory)
*/
template<ReduceFunc reduce_op, ReduceInitFunc reduce_init_op>
void reduce_cuda_generic(ReductionArray* reduct_arr, CParamConfig* cparams, real* d_vec_x, real* d_vec_y = NULL, real* d_vec_z = NULL)
{
//-------------------------------------------------
dim3 tpb, bpg;
tpb.x = COL_THREADS_X;
tpb.y = COL_THREADS_Y;
tpb.z = 1; // 2D blockdims only
const int SMEM_PER_BLOCK = tpb.x * tpb.y *tpb.z * sizeof(real);
bpg.x = ceil((real) cparams->nx / (real)COL_THREADS_X);
bpg.y = ceil((real) cparams->ny / (real)COL_THREADS_Y);
bpg.z = ceil((real) cparams->nz / (real)COL_ELEMS_PER_THREAD);
const int BLOCKS_TOTAL = bpg.x * bpg.y * bpg.z;
//------------------------------------------------
//Collectiveops works only when BLOCKS_TOTAL is divisible by the thread block size.
//This is not good and collectiveops should be rewritten to support arbitrary grid dims
assert(BLOCKS_TOTAL % (tpb.x * tpb.y * tpb.z) == 0);
switch (tpb.x*tpb.y*tpb.z)
{
case 512:
reduce_initial<512, reduce_op, reduce_init_op><<<bpg, tpb, SMEM_PER_BLOCK>>>(reduct_arr->d_partial_result, d_vec_x, d_vec_y, d_vec_z); CUDA_ERRCHK_KERNEL(); break;
case 256:
reduce_initial<256, reduce_op, reduce_init_op><<<bpg, tpb, SMEM_PER_BLOCK>>>(reduct_arr->d_partial_result, d_vec_x, d_vec_y, d_vec_z); CUDA_ERRCHK_KERNEL(); break;
case 128:
reduce_initial<128, reduce_op, reduce_init_op><<<bpg, tpb, SMEM_PER_BLOCK>>>(reduct_arr->d_partial_result, d_vec_x, d_vec_y, d_vec_z); CUDA_ERRCHK_KERNEL(); break;
default:
printf("INCORRECT THREAD SIZE!\n");
exit(EXIT_FAILURE);
}
if (BLOCKS_TOTAL >= 1024) {
reduce<1024, reduce_op><<<1, 1024, 1024*sizeof(real)>>>(reduct_arr->d_vec_res, reduct_arr->d_partial_result, BLOCKS_TOTAL); CUDA_ERRCHK_KERNEL();
} else if (BLOCKS_TOTAL >= 512) {
reduce<512, reduce_op><<<1, 512, 512*sizeof(real)>>>(reduct_arr->d_vec_res, reduct_arr->d_partial_result, BLOCKS_TOTAL); CUDA_ERRCHK_KERNEL();
} else if (BLOCKS_TOTAL >= 256) {
reduce<256, reduce_op><<<1, 256, 256*sizeof(real)>>>(reduct_arr->d_vec_res, reduct_arr->d_partial_result, BLOCKS_TOTAL); CUDA_ERRCHK_KERNEL();
} else if (BLOCKS_TOTAL >= 128) {
reduce<128, reduce_op><<<1, 128, 128*sizeof(real)>>>(reduct_arr->d_vec_res, reduct_arr->d_partial_result, BLOCKS_TOTAL); CUDA_ERRCHK_KERNEL();
} else if (BLOCKS_TOTAL >= 16) {
reduce<16, reduce_op><<<1, 16, 16*sizeof(real)>>>(reduct_arr->d_vec_res, reduct_arr->d_partial_result, BLOCKS_TOTAL); CUDA_ERRCHK_KERNEL();
} else {
printf("INCORRECT BLOCKS_TOTAL (= %d) IN collectiveops.cu!\n", BLOCKS_TOTAL);
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
//We're done
return;
}
//template<ReductType t>
real get_reduction_cuda_generic(ReductionArray* reduct_arr, ReductType t, CParamConfig* cparams, real* d_a, real* d_b, real* d_c)
{
real res;
switch (t) {
case MAX_VEC_UU:
reduce_cuda_generic<dmax, ddist>(reduct_arr, cparams, d_a, d_b, d_c);
break;
case MIN_VEC_UU:
reduce_cuda_generic<dmin, ddist>(reduct_arr, cparams, d_a, d_b, d_c);
break;
case RMS_VEC_UU:
reduce_cuda_generic<dsum, dsqrsum>(reduct_arr, cparams, d_a, d_b, d_c);
break;
case MAX_SCAL:
reduce_cuda_generic<dmax, dscal>(reduct_arr, cparams, d_a);
break;
case MIN_SCAL:
reduce_cuda_generic<dmin, dscal>(reduct_arr, cparams, d_a);
break;
case RMS_SCAL:
reduce_cuda_generic<dsum, dsqrsum>(reduct_arr, cparams, d_a);
break;
case RMS_EXP:
reduce_cuda_generic<dsum, dexpsqrscal>(reduct_arr, cparams, d_a);
break;
default:
CRASH("Invalid type!");
}
cudaDeviceSynchronize();
CUDA_ERRCHK( cudaMemcpy(&res, (real*)reduct_arr->d_vec_res, sizeof(real), cudaMemcpyDeviceToHost) );
cudaDeviceSynchronize();
return res;
}
|
c0aba03de6c1eabd22e4157744a43675563e5c6d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector> // vector
/* use this to set the block size of the kernel launches.
CUDA kernels will be launched with block size blockDimSize by blockDimSize. */
constexpr int blockDimSize = 8;
/* your job is to write convolveGPU:
convolveGPU will be called with blockSize blockDimSize x blockDimSize
and gridsize height/blockDimSizexwidth/blockDimSize.
Each thread may have to compute more than one pixel. You will need to stride the computation.
Look at convolveCPU below for more info.
*/
__global__ void convolveGPU(float const* in, float *out, int width, int height, float const* kernel, int kernelWidth, int kernelHeight) {
/* your code here */
}
/* A CPU example of the convolve kernel */
void convolveCPU(float const* in, float *out, int width, int height, float const* kernel, int kernelWidth, int kernelHeight) {
const int halfKernelHeight = kernelHeight/2;
const int halfKernelWidth = kernelWidth/2;
const int redChannel = 2;
const int greenChannel = 1;
const int blueChannel = 0;
/* point-wise loop over the image pixels */
for (int i = halfKernelHeight; i < height-halfKernelHeight; i += 1) {
for (int j = halfKernelWidth; j < width-halfKernelWidth; j += 1) {
/* compute dot product of kernel and sub-image */
float redDot = 0.0f, greenDot = 0.0f, blueDot = 0.0f;
for (int k = -halfKernelHeight; k <= halfKernelHeight; k += 1) {
for (int l = -halfKernelWidth; l <= halfKernelWidth; l += 1) {
/* add in[i+k][j+l]*kernel[k][l] to dot product for red, green, and blue */
redDot += in[(i+k)*width*3 + (j+l)*3 + redChannel] * kernel[(k+halfKernelHeight)*kernelWidth + (l+halfKernelWidth)];
greenDot += in[(i+k)*width*3 + (j+l)*3 + greenChannel] * kernel[(k+halfKernelHeight)*kernelWidth + (l+halfKernelWidth)];
blueDot += in[(i+k)*width*3 + (j+l)*3 + blueChannel] * kernel[(k+halfKernelHeight)*kernelWidth + (l+halfKernelWidth)];
}
}
/* set out[i][j] to dot product */
out[i*width*3 + j*3 + redChannel] = redDot;
out[i*width*3 + j*3 + greenChannel] = greenDot;
out[i*width*3 + j*3 + blueChannel] = blueDot;
}
}
}
/* call the convolveGPU function on each frame */
float convolveFrames(std::vector<float *> const& framesIn, std::vector<float *> &framesOut, int width, int height, float const* kernel, int kernelWidth, int kernelHeight,
hipStream_t *streams, int numStreams) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
dim3 blockSize (blockDimSize, blockDimSize);
dim3 gridSize ( height/blockSize.x + (height % blockSize.x != 0),
width/blockSize.y + (width % blockSize.y != 0) );
hipEventRecord(start, 0);
for (int i = 0; i < framesIn.size(); i += 1) {
hipLaunchKernelGGL(( convolveGPU), dim3(gridSize), dim3(blockSize), 0, streams[i % numStreams], framesIn.at(i), framesOut.at(i), width, height, kernel, kernelWidth, kernelHeight);
}
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsed;
hipEventElapsedTime(&elapsed, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
return (elapsed / 1000.0f);
}
|
c0aba03de6c1eabd22e4157744a43675563e5c6d.cu
|
#include <vector> // vector
/* use this to set the block size of the kernel launches.
CUDA kernels will be launched with block size blockDimSize by blockDimSize. */
constexpr int blockDimSize = 8;
/* your job is to write convolveGPU:
convolveGPU will be called with blockSize blockDimSize x blockDimSize
and gridsize ⌈height/blockDimSize⌉x⌈width/blockDimSize⌉.
Each thread may have to compute more than one pixel. You will need to stride the computation.
Look at convolveCPU below for more info.
*/
__global__ void convolveGPU(float const* in, float *out, int width, int height, float const* kernel, int kernelWidth, int kernelHeight) {
/* your code here */
}
/* A CPU example of the convolve kernel */
void convolveCPU(float const* in, float *out, int width, int height, float const* kernel, int kernelWidth, int kernelHeight) {
const int halfKernelHeight = kernelHeight/2;
const int halfKernelWidth = kernelWidth/2;
const int redChannel = 2;
const int greenChannel = 1;
const int blueChannel = 0;
/* point-wise loop over the image pixels */
for (int i = halfKernelHeight; i < height-halfKernelHeight; i += 1) {
for (int j = halfKernelWidth; j < width-halfKernelWidth; j += 1) {
/* compute dot product of kernel and sub-image */
float redDot = 0.0f, greenDot = 0.0f, blueDot = 0.0f;
for (int k = -halfKernelHeight; k <= halfKernelHeight; k += 1) {
for (int l = -halfKernelWidth; l <= halfKernelWidth; l += 1) {
/* add in[i+k][j+l]*kernel[k][l] to dot product for red, green, and blue */
redDot += in[(i+k)*width*3 + (j+l)*3 + redChannel] * kernel[(k+halfKernelHeight)*kernelWidth + (l+halfKernelWidth)];
greenDot += in[(i+k)*width*3 + (j+l)*3 + greenChannel] * kernel[(k+halfKernelHeight)*kernelWidth + (l+halfKernelWidth)];
blueDot += in[(i+k)*width*3 + (j+l)*3 + blueChannel] * kernel[(k+halfKernelHeight)*kernelWidth + (l+halfKernelWidth)];
}
}
/* set out[i][j] to dot product */
out[i*width*3 + j*3 + redChannel] = redDot;
out[i*width*3 + j*3 + greenChannel] = greenDot;
out[i*width*3 + j*3 + blueChannel] = blueDot;
}
}
}
/* call the convolveGPU function on each frame */
float convolveFrames(std::vector<float *> const& framesIn, std::vector<float *> &framesOut, int width, int height, float const* kernel, int kernelWidth, int kernelHeight,
cudaStream_t *streams, int numStreams) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 blockSize (blockDimSize, blockDimSize);
dim3 gridSize ( height/blockSize.x + (height % blockSize.x != 0),
width/blockSize.y + (width % blockSize.y != 0) );
cudaEventRecord(start, 0);
for (int i = 0; i < framesIn.size(); i += 1) {
convolveGPU<<<gridSize, blockSize, 0, streams[i % numStreams]>>>(framesIn.at(i), framesOut.at(i), width, height, kernel, kernelWidth, kernelHeight);
}
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return (elapsed / 1000.0f);
}
|
5593324d2fcd701339c9e2d61e9f3a3d911c16b3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <chrono>
#include <stdlib.h>
#include <ctime>
#include <cmath>
#include <limits>
__global__ void sum_vectors_if(double *a, int size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
if ((int(a[idx]) + idx) % 3 == 0) {
a[idx] = a[idx / 3] * (idx + 2);
} else {
if ((int(a[idx]) + idx) % 3 == 1) {
a[idx] = a[idx / 3] * (idx + 1);
} else {
a[idx] = a[idx / 3] * (idx - 2);
}
}
}
}
__global__ void sum_vectors(double *a, int size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
a[idx] += idx % 3;
}
}
int main(int argc, char **argv){
int n = (int)strtol(argv[1], NULL, 10);
size_t bytes = n * sizeof(double);
double *h_a;
h_a = (double *) malloc(bytes);
srand(3333);
for (int i = 0; i < n; i++){
int num = rand();
h_a[i] = num - num % 3;
}
double *d_a;
hipMalloc(&d_a, bytes);
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 1024;
gridSize = (n - 1) / 1024 + 1;
hipEvent_t start, stop;
// sum_vectors_if
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( sum_vectors_if), dim3(gridSize), dim3(blockSize), 0, 0, d_a, n);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
float sum_vectors_if = 0;
hipEventElapsedTime(&sum_vectors_if, start, stop);
std::cout << "Gpu time: " << sum_vectors_if << " sum_vectors_if" << std::endl;
// sum_vectors
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( sum_vectors), dim3(gridSize), dim3(blockSize), 0, 0, d_a, n);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
float sum_vectors = 0;
hipEventElapsedTime(&sum_vectors, start, stop);
std::cout << "Gpu time: " << sum_vectors << " sum_vectors" << std::endl;
hipFree(d_a);
free(h_a);
return 0;
}
|
5593324d2fcd701339c9e2d61e9f3a3d911c16b3.cu
|
#include <iostream>
#include <cuda.h>
#include <chrono>
#include <stdlib.h>
#include <ctime>
#include <cmath>
#include <limits>
__global__ void sum_vectors_if(double *a, int size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
if ((int(a[idx]) + idx) % 3 == 0) {
a[idx] = a[idx / 3] * (idx + 2);
} else {
if ((int(a[idx]) + idx) % 3 == 1) {
a[idx] = a[idx / 3] * (idx + 1);
} else {
a[idx] = a[idx / 3] * (idx - 2);
}
}
}
}
__global__ void sum_vectors(double *a, int size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
a[idx] += idx % 3;
}
}
int main(int argc, char **argv){
int n = (int)strtol(argv[1], NULL, 10);
size_t bytes = n * sizeof(double);
double *h_a;
h_a = (double *) malloc(bytes);
srand(3333);
for (int i = 0; i < n; i++){
int num = rand();
h_a[i] = num - num % 3;
}
double *d_a;
cudaMalloc(&d_a, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 1024;
gridSize = (n - 1) / 1024 + 1;
cudaEvent_t start, stop;
// sum_vectors_if
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
sum_vectors_if<<<gridSize, blockSize>>>(d_a, n);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float sum_vectors_if = 0;
cudaEventElapsedTime(&sum_vectors_if, start, stop);
std::cout << "Gpu time: " << sum_vectors_if << " sum_vectors_if" << std::endl;
// sum_vectors
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
sum_vectors<<<gridSize, blockSize>>>(d_a, n);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float sum_vectors = 0;
cudaEventElapsedTime(&sum_vectors, start, stop);
std::cout << "Gpu time: " << sum_vectors << " sum_vectors" << std::endl;
cudaFree(d_a);
free(h_a);
return 0;
}
|
3ddd891324fe684bb9d92cd197a141980b671ef2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "__floatToInt.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
__floatToInt), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
__floatToInt), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
__floatToInt), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
3ddd891324fe684bb9d92cd197a141980b671ef2.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "__floatToInt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
__floatToInt<<<gridBlock,threadBlock>>>(A,B,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
__floatToInt<<<gridBlock,threadBlock>>>(A,B,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
__floatToInt<<<gridBlock,threadBlock>>>(A,B,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
558f50539bc17ee9f94d3cba2764ce2d6c10fbe5.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define GLFW_INCLUDE_VULKAN
#include <GLFW/glfw3.h>
#include <vulkan/vulkan.h>
#include <algorithm>
#include <array>
#include <chrono>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <functional>
#include <iostream>
#include <set>
#include <stdexcept>
#include <thread>
#include <vector>
#ifdef _WIN64
#include <aclapi.h>
#include <dxgi1_2.h>
#include <vulkan/vulkan_win32.h>
#include <windows.h>
#include <VersionHelpers.h>
#define _USE_MATH_DEFINES
#endif
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "linmath.h"
#define WIDTH 800
#define HEIGHT 600
#define VULKAN_VALIDATION 0
const std::vector<const char*> validationLayers = {
"VK_LAYER_LUNARG_standard_validation"};
#if VULKAN_VALIDATION
const bool enableValidationLayers = true;
#else
const bool enableValidationLayers = false;
#endif
struct QueueFamilyIndices {
int graphicsFamily = -1;
int presentFamily = -1;
bool isComplete() { return graphicsFamily >= 0 && presentFamily >= 0; }
};
const std::vector<const char*> deviceExtensions = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
#ifdef _WIN64
VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME,
#else
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
#endif
};
#ifdef _WIN64
class WindowsSecurityAttributes {
protected:
SECURITY_ATTRIBUTES m_winSecurityAttributes;
PSECURITY_DESCRIPTOR m_winPSecurityDescriptor;
public:
WindowsSecurityAttributes();
SECURITY_ATTRIBUTES* operator&();
~WindowsSecurityAttributes();
};
WindowsSecurityAttributes::WindowsSecurityAttributes() {
m_winPSecurityDescriptor = (PSECURITY_DESCRIPTOR)calloc(
1, SECURITY_DESCRIPTOR_MIN_LENGTH + 2 * sizeof(void**));
// CHECK_NEQ(m_winPSecurityDescriptor, (PSECURITY_DESCRIPTOR)NULL);
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
InitializeSecurityDescriptor(m_winPSecurityDescriptor,
SECURITY_DESCRIPTOR_REVISION);
SID_IDENTIFIER_AUTHORITY sidIdentifierAuthority =
SECURITY_WORLD_SID_AUTHORITY;
AllocateAndInitializeSid(&sidIdentifierAuthority, 1, SECURITY_WORLD_RID, 0, 0,
0, 0, 0, 0, 0, ppSID);
EXPLICIT_ACCESS explicitAccess;
ZeroMemory(&explicitAccess, sizeof(EXPLICIT_ACCESS));
explicitAccess.grfAccessPermissions =
STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL;
explicitAccess.grfAccessMode = SET_ACCESS;
explicitAccess.grfInheritance = INHERIT_ONLY;
explicitAccess.Trustee.TrusteeForm = TRUSTEE_IS_SID;
explicitAccess.Trustee.TrusteeType = TRUSTEE_IS_WELL_KNOWN_GROUP;
explicitAccess.Trustee.ptstrName = (LPTSTR)*ppSID;
SetEntriesInAcl(1, &explicitAccess, NULL, ppACL);
SetSecurityDescriptorDacl(m_winPSecurityDescriptor, TRUE, *ppACL, FALSE);
m_winSecurityAttributes.nLength = sizeof(m_winSecurityAttributes);
m_winSecurityAttributes.lpSecurityDescriptor = m_winPSecurityDescriptor;
m_winSecurityAttributes.bInheritHandle = TRUE;
}
SECURITY_ATTRIBUTES* WindowsSecurityAttributes::operator&() {
return &m_winSecurityAttributes;
}
WindowsSecurityAttributes::~WindowsSecurityAttributes() {
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
if (*ppSID) {
FreeSid(*ppSID);
}
if (*ppACL) {
LocalFree(*ppACL);
}
free(m_winPSecurityDescriptor);
}
#endif
struct UniformBufferObject {
mat4x4 model;
mat4x4 view;
mat4x4 proj;
};
struct SwapChainSupportDetails {
VkSurfaceCapabilitiesKHR capabilities;
std::vector<VkSurfaceFormatKHR> formats;
std::vector<VkPresentModeKHR> presentModes;
};
struct Vertex {
vec4 pos;
vec3 color;
static VkVertexInputBindingDescription getBindingDescription() {
VkVertexInputBindingDescription bindingDescription = {};
bindingDescription.binding = 0;
bindingDescription.stride = sizeof(Vertex);
bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
return bindingDescription;
}
static std::array<VkVertexInputAttributeDescription, 2>
getAttributeDescriptions() {
std::array<VkVertexInputAttributeDescription, 2> attributeDescriptions = {};
attributeDescriptions[0].binding = 0;
attributeDescriptions[0].location = 0;
attributeDescriptions[0].format = VK_FORMAT_R32G32B32A32_SFLOAT;
attributeDescriptions[0].offset = offsetof(Vertex, pos);
attributeDescriptions[1].binding = 0;
attributeDescriptions[1].location = 1;
attributeDescriptions[1].format = VK_FORMAT_R32G32B32_SFLOAT;
attributeDescriptions[1].offset = offsetof(Vertex, color);
return attributeDescriptions;
}
};
size_t mesh_width = 0, mesh_height = 0;
std::string execution_path;
__global__ void sinewave_gen_kernel(Vertex* vertices, unsigned int width,
unsigned int height, float time) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u * freq + time) * cosf(v * freq + time) * 0.5f;
if (y < height && x < width) {
// write output vertex
vertices[y * width + x].pos[0] = u;
vertices[y * width + x].pos[1] = w;
vertices[y * width + x].pos[2] = v;
vertices[y * width + x].pos[3] = 1.0f;
vertices[y * width + x].color[0] = 1.0f;
vertices[y * width + x].color[1] = 0.0f;
vertices[y * width + x].color[2] = 0.0f;
}
}
class vulkanCudaApp {
public:
void run() {
initWindow();
initVulkan();
initCuda();
mainLoop();
cleanup();
}
private:
GLFWwindow* window;
VkInstance instance;
VkPhysicalDevice physicalDevice = VK_NULL_HANDLE;
uint8_t vkDeviceUUID[VK_UUID_SIZE];
VkDevice device;
VkQueue graphicsQueue;
VkQueue presentQueue;
VkSurfaceKHR surface;
VkSwapchainKHR swapChain;
std::vector<VkImage> swapChainImages;
VkFormat swapChainImageFormat;
VkExtent2D swapChainExtent;
std::vector<VkImageView> swapChainImageViews;
VkDescriptorSetLayout descriptorSetLayout;
VkDescriptorPool descriptorPool;
VkDescriptorSet descriptorSet;
VkPipelineLayout pipelineLayout;
VkRenderPass renderPass;
VkPipeline graphicsPipeline;
std::vector<VkFramebuffer> swapChainFramebuffers;
VkCommandPool commandPool;
VkBuffer vertexBuffer;
VkDeviceMemory vertexBufferMemory;
VkBuffer uniformBuffer;
VkDeviceMemory uniformBufferMemory;
std::vector<VkCommandBuffer> commandBuffers;
VkSemaphore imageAvailableSemaphore;
VkSemaphore renderFinishedSemaphore;
VkSemaphore cudaUpdateVkVertexBufSemaphore;
VkSemaphore vkUpdateCudaVertexBufSemaphore;
size_t vertexBufSize = 0;
bool startSubmit = 0;
double AnimTime = 1.0f;
VkDebugReportCallbackEXT callback;
#ifdef _WIN64
PFN_vkGetMemoryWin32HandleKHR fpGetMemoryWin32HandleKHR;
PFN_vkGetSemaphoreWin32HandleKHR fpGetSemaphoreWin32HandleKHR;
#else
PFN_vkGetMemoryFdKHR fpGetMemoryFdKHR;
PFN_vkGetSemaphoreFdKHR fpGetSemaphoreFdKHR;
#endif
PFN_vkGetPhysicalDeviceProperties2 fpGetPhysicalDeviceProperties2;
// CUDA stuff
cudaExternalMemory_t cudaExtMemVertexBuffer;
cudaExternalSemaphore_t cudaExtCudaUpdateVkVertexBufSemaphore;
cudaExternalSemaphore_t cudaExtVkUpdateCudaVertexBufSemaphore;
void* cudaDevVertptr = NULL;
hipStream_t streamToRun;
bool checkValidationLayerSupport() {
uint32_t layerCount;
vkEnumerateInstanceLayerProperties(&layerCount, nullptr);
std::vector<VkLayerProperties> availableLayers(layerCount);
vkEnumerateInstanceLayerProperties(&layerCount, availableLayers.data());
for (const char* layerName : validationLayers) {
bool layerFound = false;
for (const auto& layerProperties : availableLayers) {
if (strcmp(layerName, layerProperties.layerName) == 0) {
layerFound = true;
break;
}
}
if (!layerFound) {
return false;
}
}
return true;
}
static VKAPI_ATTR VkBool32 VKAPI_CALL
debugCallback(VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType,
uint64_t obj, size_t location, int32_t code,
const char* layerPrefix, const char* msg, void* userData) {
std::cerr << "validation layer: " << msg << std::endl;
return VK_FALSE;
}
VkResult CreateDebugReportCallbackEXT(
VkInstance instance,
const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDebugReportCallbackEXT* pCallback) {
auto func = (PFN_vkCreateDebugReportCallbackEXT)vkGetInstanceProcAddr(
instance, "vkCreateDebugReportCallbackEXT");
if (func != nullptr) {
return func(instance, pCreateInfo, pAllocator, pCallback);
} else {
return VK_ERROR_EXTENSION_NOT_PRESENT;
}
}
void DestroyDebugReportCallbackEXT(VkInstance instance,
VkDebugReportCallbackEXT callback,
const VkAllocationCallbacks* pAllocator) {
auto func = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr(
instance, "vkDestroyDebugReportCallbackEXT");
if (func != nullptr) {
func(instance, callback, pAllocator);
}
}
void setupDebugCallback() {
if (!enableValidationLayers) return;
VkDebugReportCallbackCreateInfoEXT createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT;
createInfo.flags =
VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
createInfo.pfnCallback = debugCallback;
if (CreateDebugReportCallbackEXT(instance, &createInfo, nullptr,
&callback) != VK_SUCCESS) {
throw std::runtime_error("failed to set up debug callback!");
}
}
void initWindow() {
glfwInit();
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
glfwWindowHint(GLFW_RESIZABLE, GLFW_FALSE);
window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan-CUDA Interop Sinewave",
nullptr, nullptr);
}
void createInstance() {
if (enableValidationLayers && !checkValidationLayerSupport()) {
throw std::runtime_error(
"validation layers requested, but not available!");
}
VkApplicationInfo appInfo = {};
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
appInfo.pApplicationName = "Vulkan CUDA Sinewave";
appInfo.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.pEngineName = "No Engine";
appInfo.engineVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.apiVersion = VK_API_VERSION_1_0;
VkInstanceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
createInfo.pApplicationInfo = &appInfo;
uint32_t glfwExtensionCount = 0;
const char** glfwExtensions;
glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
std::vector<const char*> enabledExtensionNameList;
enabledExtensionNameList.push_back(
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
enabledExtensionNameList.push_back(
VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME);
enabledExtensionNameList.push_back(
VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME);
for (int i = 0; i < glfwExtensionCount; i++) {
enabledExtensionNameList.push_back(glfwExtensions[i]);
}
if (enableValidationLayers) {
enabledExtensionNameList.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
} else {
createInfo.enabledLayerCount = 0;
}
createInfo.enabledExtensionCount = enabledExtensionNameList.size();
createInfo.ppEnabledExtensionNames = enabledExtensionNameList.data();
if (vkCreateInstance(&createInfo, nullptr, &instance) != VK_SUCCESS) {
throw std::runtime_error("failed to create instance!");
} else {
std::cout << "Instance created successfully!!\n";
}
fpGetPhysicalDeviceProperties2 =
(PFN_vkGetPhysicalDeviceProperties2)vkGetInstanceProcAddr(
instance, "vkGetPhysicalDeviceProperties2");
if (fpGetPhysicalDeviceProperties2 == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetPhysicalDeviceProperties2KHR\" not "
"found.\n");
}
#ifdef _WIN64
fpGetMemoryWin32HandleKHR =
(PFN_vkGetMemoryWin32HandleKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryWin32HandleKHR");
if (fpGetMemoryWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryFdKHR");
if (fpGetMemoryFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryFdKHR\" not found.\n");
}
#endif
}
void initVulkan() {
createInstance();
setupDebugCallback();
createSurface();
pickPhysicalDevice();
createLogicalDevice();
getKhrExtensionsFn();
createSwapChain();
createImageViews();
createRenderPass();
createDescriptorSetLayout();
createGraphicsPipeline();
createFramebuffers();
createCommandPool();
createVertexBuffer();
createUniformBuffer();
createDescriptorPool();
createDescriptorSet();
createCommandBuffers();
createSyncObjects();
createSyncObjectsExt();
}
void initCuda() {
setCudaVkDevice();
cudaVkImportVertexMem();
cudaInitVertexMem();
cudaVkImportSemaphore();
}
void createSurface() {
if (glfwCreateWindowSurface(instance, window, nullptr, &surface) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create window surface!");
}
}
void pickPhysicalDevice() {
uint32_t deviceCount = 0;
vkEnumeratePhysicalDevices(instance, &deviceCount, nullptr);
if (deviceCount == 0) {
throw std::runtime_error("failed to find GPUs with Vulkan support!");
}
std::vector<VkPhysicalDevice> devices(deviceCount);
vkEnumeratePhysicalDevices(instance, &deviceCount, devices.data());
for (const auto& device : devices) {
if (isDeviceSuitable(device)) {
physicalDevice = device;
break;
}
}
if (physicalDevice == VK_NULL_HANDLE) {
throw std::runtime_error("failed to find a suitable GPU!");
}
std::cout << "Selected physical device = " << physicalDevice << std::endl;
VkPhysicalDeviceIDProperties vkPhysicalDeviceIDProperties = {};
vkPhysicalDeviceIDProperties.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES;
vkPhysicalDeviceIDProperties.pNext = NULL;
VkPhysicalDeviceProperties2 vkPhysicalDeviceProperties2 = {};
vkPhysicalDeviceProperties2.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
vkPhysicalDeviceProperties2.pNext = &vkPhysicalDeviceIDProperties;
fpGetPhysicalDeviceProperties2(physicalDevice,
&vkPhysicalDeviceProperties2);
memcpy(vkDeviceUUID, vkPhysicalDeviceIDProperties.deviceUUID,
sizeof(vkDeviceUUID));
}
int setCudaVkDevice() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceCount(&device_count));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the GPU which is selected by Vulkan
while (current_device < device_count) {
hipGetDeviceProperties(&deviceProp, current_device);
if ((deviceProp.computeMode != hipComputeModeProhibited)) {
// Compare the cuda device UUID with vulkan UUID
int ret = memcmp(&deviceProp.uuid, &vkDeviceUUID, VK_UUID_SIZE);
if (ret == 0) {
checkCudaErrors(hipSetDevice(current_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, current_device));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, deviceProp.name, deviceProp.major,
deviceProp.minor);
return current_device;
}
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No Vulkan-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
bool isDeviceSuitable(VkPhysicalDevice device) {
QueueFamilyIndices indices = findQueueFamilies(device);
bool extensionsSupported = checkDeviceExtensionSupport(device);
bool swapChainAdequate = false;
if (extensionsSupported) {
SwapChainSupportDetails swapChainSupport = querySwapChainSupport(device);
swapChainAdequate = !swapChainSupport.formats.empty() &&
!swapChainSupport.presentModes.empty();
}
return indices.isComplete() && extensionsSupported && swapChainAdequate;
}
bool checkDeviceExtensionSupport(VkPhysicalDevice device) {
uint32_t extensionCount;
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
nullptr);
std::vector<VkExtensionProperties> availableExtensions(extensionCount);
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
availableExtensions.data());
std::set<std::string> requiredExtensions(deviceExtensions.begin(),
deviceExtensions.end());
for (const auto& extension : availableExtensions) {
requiredExtensions.erase(extension.extensionName);
}
return requiredExtensions.empty();
}
QueueFamilyIndices findQueueFamilies(VkPhysicalDevice device) {
QueueFamilyIndices indices;
uint32_t queueFamilyCount = 0;
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
nullptr);
std::vector<VkQueueFamilyProperties> queueFamilies(queueFamilyCount);
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
queueFamilies.data());
int i = 0;
for (const auto& queueFamily : queueFamilies) {
if (queueFamily.queueCount > 0 &&
queueFamily.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
indices.graphicsFamily = i;
}
VkBool32 presentSupport = false;
vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface, &presentSupport);
if (queueFamily.queueCount > 0 && presentSupport) {
indices.presentFamily = i;
}
if (indices.isComplete()) {
break;
}
i++;
}
return indices;
}
SwapChainSupportDetails querySwapChainSupport(VkPhysicalDevice device) {
SwapChainSupportDetails details;
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface,
&details.capabilities);
uint32_t formatCount;
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
nullptr);
if (formatCount != 0) {
details.formats.resize(formatCount);
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
details.formats.data());
}
uint32_t presentModeCount;
vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface,
&presentModeCount, nullptr);
if (presentModeCount != 0) {
details.presentModes.resize(presentModeCount);
vkGetPhysicalDeviceSurfacePresentModesKHR(
device, surface, &presentModeCount, details.presentModes.data());
}
return details;
}
VkSurfaceFormatKHR chooseSwapSurfaceFormat(
const std::vector<VkSurfaceFormatKHR>& availableFormats) {
if (availableFormats.size() == 1 &&
availableFormats[0].format == VK_FORMAT_UNDEFINED) {
return {VK_FORMAT_B8G8R8A8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR};
}
for (const auto& availableFormat : availableFormats) {
if (availableFormat.format == VK_FORMAT_B8G8R8A8_UNORM &&
availableFormat.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
return availableFormat;
}
}
return availableFormats[0];
}
VkPresentModeKHR chooseSwapPresentMode(
const std::vector<VkPresentModeKHR> availablePresentModes) {
VkPresentModeKHR bestMode = VK_PRESENT_MODE_FIFO_KHR;
for (const auto& availablePresentMode : availablePresentModes) {
if (availablePresentMode == VK_PRESENT_MODE_MAILBOX_KHR) {
return availablePresentMode;
} else if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
bestMode = availablePresentMode;
}
}
return bestMode;
}
VkExtent2D chooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities) {
if (capabilities.currentExtent.width !=
std::numeric_limits<uint32_t>::max()) {
return capabilities.currentExtent;
} else {
VkExtent2D actualExtent = {WIDTH, HEIGHT};
actualExtent.width = ::max(
capabilities.minImageExtent.width,
::min(capabilities.maxImageExtent.width, actualExtent.width));
actualExtent.height = ::max(
capabilities.minImageExtent.height,
::min(capabilities.maxImageExtent.height, actualExtent.height));
return actualExtent;
}
}
void createLogicalDevice() {
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos;
std::set<int> uniqueQueueFamilies = {indices.graphicsFamily,
indices.presentFamily};
float queuePriority = 1.0f;
for (int queueFamily : uniqueQueueFamilies) {
VkDeviceQueueCreateInfo queueCreateInfo = {};
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = queueFamily;
queueCreateInfo.queueCount = 1;
queueCreateInfo.pQueuePriorities = &queuePriority;
queueCreateInfos.push_back(queueCreateInfo);
}
VkPhysicalDeviceFeatures deviceFeatures = {};
VkDeviceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
createInfo.pQueueCreateInfos = queueCreateInfos.data();
createInfo.queueCreateInfoCount = queueCreateInfos.size();
createInfo.pEnabledFeatures = &deviceFeatures;
std::vector<const char*> enabledExtensionNameList;
for (int i = 0; i < deviceExtensions.size(); i++) {
enabledExtensionNameList.push_back(deviceExtensions[i]);
}
if (enableValidationLayers) {
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
} else {
createInfo.enabledLayerCount = 0;
}
createInfo.enabledExtensionCount =
static_cast<uint32_t>(enabledExtensionNameList.size());
createInfo.ppEnabledExtensionNames = enabledExtensionNameList.data();
if (vkCreateDevice(physicalDevice, &createInfo, nullptr, &device) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create logical device!");
}
vkGetDeviceQueue(device, indices.graphicsFamily, 0, &graphicsQueue);
vkGetDeviceQueue(device, indices.presentFamily, 0, &presentQueue);
}
void createSwapChain() {
SwapChainSupportDetails swapChainSupport =
querySwapChainSupport(physicalDevice);
VkSurfaceFormatKHR surfaceFormat =
chooseSwapSurfaceFormat(swapChainSupport.formats);
VkPresentModeKHR presentMode =
chooseSwapPresentMode(swapChainSupport.presentModes);
VkExtent2D extent = chooseSwapExtent(swapChainSupport.capabilities);
uint32_t imageCount = swapChainSupport.capabilities.minImageCount + 1;
if (swapChainSupport.capabilities.maxImageCount > 0 &&
imageCount > swapChainSupport.capabilities.maxImageCount) {
imageCount = swapChainSupport.capabilities.maxImageCount;
}
VkSwapchainCreateInfoKHR createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
createInfo.surface = surface;
createInfo.minImageCount = imageCount;
createInfo.imageFormat = surfaceFormat.format;
createInfo.imageColorSpace = surfaceFormat.colorSpace;
createInfo.imageExtent = extent;
createInfo.imageArrayLayers = 1;
createInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
uint32_t queueFamilyIndices[] = {(uint32_t)indices.graphicsFamily,
(uint32_t)indices.presentFamily};
if (indices.graphicsFamily != indices.presentFamily) {
createInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
createInfo.queueFamilyIndexCount = 2;
createInfo.pQueueFamilyIndices = queueFamilyIndices;
} else {
createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
createInfo.queueFamilyIndexCount = 0; // Optional
createInfo.pQueueFamilyIndices = nullptr; // Optional
}
createInfo.preTransform = swapChainSupport.capabilities.currentTransform;
createInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
createInfo.presentMode = presentMode;
createInfo.clipped = VK_TRUE;
createInfo.oldSwapchain = VK_NULL_HANDLE;
if (vkCreateSwapchainKHR(device, &createInfo, nullptr, &swapChain) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create swap chain!");
} else {
std::cout << "Swapchain created!!\n";
}
vkGetSwapchainImagesKHR(device, swapChain, &imageCount, nullptr);
swapChainImages.resize(imageCount);
vkGetSwapchainImagesKHR(device, swapChain, &imageCount,
swapChainImages.data());
swapChainImageFormat = surfaceFormat.format;
swapChainExtent = extent;
}
void createImageViews() {
swapChainImageViews.resize(swapChainImages.size());
for (size_t i = 0; i < swapChainImages.size(); i++) {
VkImageViewCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
createInfo.image = swapChainImages[i];
createInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
createInfo.format = swapChainImageFormat;
createInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
createInfo.subresourceRange.baseMipLevel = 0;
createInfo.subresourceRange.levelCount = 1;
createInfo.subresourceRange.baseArrayLayer = 0;
createInfo.subresourceRange.layerCount = 1;
if (vkCreateImageView(device, &createInfo, nullptr,
&swapChainImageViews[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to create image views!");
}
}
}
void createDescriptorSetLayout() {
VkDescriptorSetLayoutBinding uboLayoutBinding = {};
uboLayoutBinding.binding = 0;
uboLayoutBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
uboLayoutBinding.descriptorCount = 1;
uboLayoutBinding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
uboLayoutBinding.pImmutableSamplers = nullptr; // Optional
VkDescriptorSetLayoutCreateInfo layoutInfo = {};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.bindingCount = 1;
layoutInfo.pBindings = &uboLayoutBinding;
if (vkCreateDescriptorSetLayout(device, &layoutInfo, nullptr,
&descriptorSetLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor set layout!");
}
}
void createGraphicsPipeline() {
auto vertShaderCode = readFile("shader_sine.vert");
auto fragShaderCode = readFile("shader_sine.frag");
VkShaderModule vertShaderModule;
VkShaderModule fragShaderModule;
vertShaderModule = createShaderModule(vertShaderCode);
fragShaderModule = createShaderModule(fragShaderCode);
VkPipelineShaderStageCreateInfo vertShaderStageInfo = {};
vertShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertShaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertShaderStageInfo.module = vertShaderModule;
vertShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo fragShaderStageInfo = {};
fragShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragShaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragShaderStageInfo.module = fragShaderModule;
fragShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo shaderStages[] = {vertShaderStageInfo,
fragShaderStageInfo};
VkPipelineVertexInputStateCreateInfo vertexInputInfo = {};
vertexInputInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
auto bindingDescription = Vertex::getBindingDescription();
auto attributeDescriptions = Vertex::getAttributeDescriptions();
vertexInputInfo.vertexBindingDescriptionCount = 1;
vertexInputInfo.pVertexBindingDescriptions = &bindingDescription;
vertexInputInfo.vertexAttributeDescriptionCount =
static_cast<uint32_t>(attributeDescriptions.size());
vertexInputInfo.pVertexAttributeDescriptions = attributeDescriptions.data();
VkPipelineInputAssemblyStateCreateInfo inputAssembly = {};
inputAssembly.sType =
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
inputAssembly.primitiveRestartEnable = VK_FALSE;
VkViewport viewport = {};
viewport.x = 0.0f;
viewport.y = 0.0f;
viewport.width = (float)swapChainExtent.width;
viewport.height = (float)swapChainExtent.height;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
VkRect2D scissor = {};
scissor.offset = {0, 0};
scissor.extent = swapChainExtent;
VkPipelineViewportStateCreateInfo viewportState = {};
viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewportState.viewportCount = 1;
viewportState.pViewports = &viewport;
viewportState.scissorCount = 1;
viewportState.pScissors = &scissor;
VkPipelineRasterizationStateCreateInfo rasterizer = {};
rasterizer.sType =
VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterizer.depthClampEnable = VK_FALSE;
rasterizer.rasterizerDiscardEnable = VK_FALSE;
rasterizer.polygonMode = VK_POLYGON_MODE_FILL;
rasterizer.lineWidth = 1.0f;
rasterizer.cullMode = VK_CULL_MODE_BACK_BIT;
rasterizer.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterizer.depthBiasEnable = VK_FALSE;
rasterizer.depthBiasConstantFactor = 0.0f; // Optional
rasterizer.depthBiasClamp = 0.0f; // Optional
rasterizer.depthBiasSlopeFactor = 0.0f; // Optional
VkPipelineMultisampleStateCreateInfo multisampling = {};
multisampling.sType =
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisampling.sampleShadingEnable = VK_FALSE;
multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisampling.minSampleShading = 1.0f; // Optional
multisampling.pSampleMask = nullptr; // Optional
multisampling.alphaToCoverageEnable = VK_FALSE; // Optional
multisampling.alphaToOneEnable = VK_FALSE; // Optional
VkPipelineColorBlendAttachmentState colorBlendAttachment = {};
colorBlendAttachment.colorWriteMask =
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
colorBlendAttachment.blendEnable = VK_FALSE;
colorBlendAttachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE; // Optional
colorBlendAttachment.dstColorBlendFactor =
VK_BLEND_FACTOR_ZERO; // Optional
colorBlendAttachment.colorBlendOp = VK_BLEND_OP_ADD; // Optional
colorBlendAttachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; // Optional
colorBlendAttachment.dstAlphaBlendFactor =
VK_BLEND_FACTOR_ZERO; // Optional
colorBlendAttachment.alphaBlendOp = VK_BLEND_OP_ADD; // Optional
VkPipelineColorBlendStateCreateInfo colorBlending = {};
colorBlending.sType =
VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
colorBlending.logicOpEnable = VK_FALSE;
colorBlending.logicOp = VK_LOGIC_OP_COPY; // Optional
colorBlending.attachmentCount = 1;
colorBlending.pAttachments = &colorBlendAttachment;
colorBlending.blendConstants[0] = 0.0f; // Optional
colorBlending.blendConstants[1] = 0.0f; // Optional
colorBlending.blendConstants[2] = 0.0f; // Optional
colorBlending.blendConstants[3] = 0.0f; // Optional
#if 0
VkDynamicState dynamicStates[] = {
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_LINE_WIDTH
};
VkPipelineDynamicStateCreateInfo dynamicState = {};
dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamicState.dynamicStateCount = 2;
dynamicState.pDynamicStates = dynamicStates;
#endif
VkPipelineLayoutCreateInfo pipelineLayoutInfo = {};
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutInfo.setLayoutCount = 1; // Optional
pipelineLayoutInfo.pSetLayouts = &descriptorSetLayout; // Optional
pipelineLayoutInfo.pushConstantRangeCount = 0; // Optional
pipelineLayoutInfo.pPushConstantRanges = nullptr; // Optional
if (vkCreatePipelineLayout(device, &pipelineLayoutInfo, nullptr,
&pipelineLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create pipeline layout!");
}
VkGraphicsPipelineCreateInfo pipelineInfo = {};
pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipelineInfo.stageCount = 2;
pipelineInfo.pStages = shaderStages;
pipelineInfo.pVertexInputState = &vertexInputInfo;
pipelineInfo.pInputAssemblyState = &inputAssembly;
pipelineInfo.pViewportState = &viewportState;
pipelineInfo.pRasterizationState = &rasterizer;
pipelineInfo.pMultisampleState = &multisampling;
pipelineInfo.pDepthStencilState = nullptr; // Optional
pipelineInfo.pColorBlendState = &colorBlending;
pipelineInfo.pDynamicState = nullptr; // Optional
pipelineInfo.layout = pipelineLayout;
pipelineInfo.renderPass = renderPass;
pipelineInfo.subpass = 0;
pipelineInfo.basePipelineHandle = VK_NULL_HANDLE; // Optional
pipelineInfo.basePipelineIndex = -1; // Optional
if (vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &pipelineInfo,
nullptr, &graphicsPipeline) != VK_SUCCESS) {
throw std::runtime_error("failed to create graphics pipeline!");
} else {
std::cout << "Pipeline created successfully!!\n";
}
vkDestroyShaderModule(device, fragShaderModule, nullptr);
vkDestroyShaderModule(device, vertShaderModule, nullptr);
}
void createRenderPass() {
VkAttachmentDescription colorAttachment = {};
colorAttachment.format = swapChainImageFormat;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
colorAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
colorAttachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkAttachmentReference colorAttachmentRef = {};
colorAttachmentRef.attachment = 0;
colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttachmentRef;
VkRenderPassCreateInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = 1;
renderPassInfo.pAttachments = &colorAttachment;
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
VkSubpassDependency dependency = {};
dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
dependency.dstSubpass = 0;
dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.srcAccessMask = 0;
dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
renderPassInfo.dependencyCount = 1;
renderPassInfo.pDependencies = &dependency;
if (vkCreateRenderPass(device, &renderPassInfo, nullptr, &renderPass) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create render pass!");
}
}
void createFramebuffers() {
swapChainFramebuffers.resize(swapChainImageViews.size());
for (size_t i = 0; i < swapChainImageViews.size(); i++) {
VkImageView attachments[] = {swapChainImageViews[i]};
VkFramebufferCreateInfo framebufferInfo = {};
framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferInfo.renderPass = renderPass;
framebufferInfo.attachmentCount = 1;
framebufferInfo.pAttachments = attachments;
framebufferInfo.width = swapChainExtent.width;
framebufferInfo.height = swapChainExtent.height;
framebufferInfo.layers = 1;
if (vkCreateFramebuffer(device, &framebufferInfo, nullptr,
&swapChainFramebuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to create framebuffer!");
}
}
}
void createCommandPool() {
QueueFamilyIndices queueFamilyIndices = findQueueFamilies(physicalDevice);
VkCommandPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.queueFamilyIndex = queueFamilyIndices.graphicsFamily;
poolInfo.flags = 0; // Optional
if (vkCreateCommandPool(device, &poolInfo, nullptr, &commandPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create command pool!");
}
}
void createBuffer(VkDeviceSize size, VkBufferUsageFlags usage,
VkMemoryPropertyFlags properties, VkBuffer& buffer,
VkDeviceMemory& bufferMemory) {
VkBufferCreateInfo bufferInfo = {};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.size = size;
bufferInfo.usage = usage;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(device, &bufferInfo, nullptr, &buffer) != VK_SUCCESS) {
throw std::runtime_error("failed to create buffer!");
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
if (vkAllocateMemory(device, &allocInfo, nullptr, &bufferMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate buffer memory!");
}
vkBindBufferMemory(device, buffer, bufferMemory, 0);
}
void createBufferExtMem(VkDeviceSize size, VkBufferUsageFlags usage,
VkMemoryPropertyFlags properties,
VkExternalMemoryHandleTypeFlagsKHR extMemHandleType,
VkBuffer& buffer, VkDeviceMemory& bufferMemory) {
VkBufferCreateInfo bufferInfo = {};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.size = size;
bufferInfo.usage = usage;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(device, &bufferInfo, nullptr, &buffer) != VK_SUCCESS) {
throw std::runtime_error("failed to create buffer!");
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportMemoryWin32HandleInfoKHR vulkanExportMemoryWin32HandleInfoKHR = {};
vulkanExportMemoryWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR;
vulkanExportMemoryWin32HandleInfoKHR.pNext = NULL;
vulkanExportMemoryWin32HandleInfoKHR.pAttributes = &winSecurityAttributes;
vulkanExportMemoryWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportMemoryWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportMemoryAllocateInfoKHR vulkanExportMemoryAllocateInfoKHR = {};
vulkanExportMemoryAllocateInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
#ifdef _WIN64
vulkanExportMemoryAllocateInfoKHR.pNext =
extMemHandleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR
? &vulkanExportMemoryWin32HandleInfoKHR
: NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes = extMemHandleType;
#else
vulkanExportMemoryAllocateInfoKHR.pNext = NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
#endif
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.pNext = &vulkanExportMemoryAllocateInfoKHR;
allocInfo.allocationSize = memRequirements.size;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
if (vkAllocateMemory(device, &allocInfo, nullptr, &bufferMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate external buffer memory!");
}
vkBindBufferMemory(device, buffer, bufferMemory, 0);
}
void createVertexBuffer() {
mesh_width = swapChainExtent.width / 2;
mesh_height = swapChainExtent.height / 2;
vertexBufSize = mesh_height * mesh_width;
VkDeviceSize bufferSize = sizeof(Vertex) * vertexBufSize;
#ifdef _WIN64
if (IsWindows8OrGreater()) {
createBufferExtMem(bufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT,
vertexBuffer, vertexBufferMemory);
} else {
createBufferExtMem(bufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
vertexBuffer, vertexBufferMemory);
}
#else
createBufferExtMem(bufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
vertexBuffer, vertexBufferMemory);
#endif
}
void cudaInitVertexMem() {
checkCudaErrors(hipStreamCreate(&streamToRun));
dim3 block(16, 16, 1);
dim3 grid(mesh_width / 16, mesh_height / 16, 1);
Vertex* vertices = (Vertex*)cudaDevVertptr;
hipLaunchKernelGGL(( sinewave_gen_kernel), dim3(grid), dim3(block), 0, streamToRun, vertices, mesh_width,
mesh_height, 1.0);
checkCudaErrors(hipStreamSynchronize(streamToRun));
}
void createUniformBuffer() {
VkDeviceSize bufferSize = sizeof(UniformBufferObject);
createBuffer(bufferSize, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
uniformBuffer, uniformBufferMemory);
}
uint32_t findMemoryType(uint32_t typeFilter,
VkMemoryPropertyFlags properties) {
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties);
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) {
if (typeFilter & (1 << i) && (memProperties.memoryTypes[i].propertyFlags &
properties) == properties) {
return i;
}
}
throw std::runtime_error("failed to find suitable memory type!");
}
void getKhrExtensionsFn() {
#ifdef _WIN64
fpGetSemaphoreWin32HandleKHR =
(PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreWin32HandleKHR");
if (fpGetSemaphoreWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreFdKHR");
if (fpGetSemaphoreFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreFdKHR\" not found.\n");
}
#endif
}
void createCommandBuffers() {
commandBuffers.resize(swapChainFramebuffers.size());
VkCommandBufferAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.commandPool = commandPool;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandBufferCount = (uint32_t)commandBuffers.size();
if (vkAllocateCommandBuffers(device, &allocInfo, commandBuffers.data()) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate command buffers!");
}
for (size_t i = 0; i < commandBuffers.size(); i++) {
VkCommandBufferBeginInfo beginInfo = {};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
beginInfo.pInheritanceInfo = nullptr; // Optional
if (vkBeginCommandBuffer(commandBuffers[i], &beginInfo) != VK_SUCCESS) {
throw std::runtime_error("failed to begin recording command buffer!");
}
VkRenderPassBeginInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = renderPass;
renderPassInfo.framebuffer = swapChainFramebuffers[i];
renderPassInfo.renderArea.offset = {0, 0};
renderPassInfo.renderArea.extent = swapChainExtent;
VkClearValue clearColor = {0.0f, 0.0f, 0.0f, 1.0f};
renderPassInfo.clearValueCount = 1;
renderPassInfo.pClearValues = &clearColor;
vkCmdBeginRenderPass(commandBuffers[i], &renderPassInfo,
VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS,
graphicsPipeline);
VkBuffer vertexBuffers[] = {vertexBuffer};
VkDeviceSize offsets[] = {0};
vkCmdBindVertexBuffers(commandBuffers[i], 0, 1, vertexBuffers, offsets);
vkCmdBindDescriptorSets(commandBuffers[i],
VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout,
0, 1, &descriptorSet, 0, nullptr);
vkCmdDraw(commandBuffers[i], static_cast<uint32_t>(vertexBufSize), 1, 0,
0);
vkCmdEndRenderPass(commandBuffers[i]);
if (vkEndCommandBuffer(commandBuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to record command buffer!");
}
}
}
VkShaderModule createShaderModule(const std::vector<char>& code) {
VkShaderModuleCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
createInfo.codeSize = code.size();
createInfo.pCode = reinterpret_cast<const uint32_t*>(code.data());
VkShaderModule shaderModule;
if (vkCreateShaderModule(device, &createInfo, nullptr, &shaderModule) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create shader module!");
}
return shaderModule;
}
static std::vector<char> readFile(const std::string& filename) {
char* file_path = sdkFindFilePath(filename.c_str(), execution_path.c_str());
std::ifstream file(file_path, std::ios::ate | std::ios::binary);
if (!file.is_open()) {
throw std::runtime_error("failed to open shader spv file!\n");
}
size_t fileSize = (size_t)file.tellg();
std::vector<char> buffer(fileSize);
file.seekg(0);
file.read(buffer.data(), fileSize);
file.close();
return buffer;
}
void mainLoop() {
updateUniformBuffer();
while (!glfwWindowShouldClose(window)) {
glfwPollEvents();
drawFrame();
}
vkDeviceWaitIdle(device);
}
void updateUniformBuffer() {
UniformBufferObject ubo = {};
mat4x4_identity(ubo.model);
mat4x4 Model;
mat4x4_dup(Model, ubo.model);
mat4x4_rotate(ubo.model, Model, 1.0f, 0.0f, 1.0f, degreesToRadians(45.0f));
vec3 eye = {2.0f, 2.0f, 2.0f};
vec3 center = {0.0f, 0.0f, 0.0f};
vec3 up = {0.0f, 0.0f, 1.0f};
mat4x4_look_at(ubo.view, eye, center, up);
mat4x4_perspective(ubo.proj, degreesToRadians(45.0f),
swapChainExtent.width / (float)swapChainExtent.height,
0.1f, 10.0f);
ubo.proj[1][1] *= -1;
void* data;
vkMapMemory(device, uniformBufferMemory, 0, sizeof(ubo), 0, &data);
memcpy(data, &ubo, sizeof(ubo));
vkUnmapMemory(device, uniformBufferMemory);
}
void createDescriptorPool() {
VkDescriptorPoolSize poolSize = {};
poolSize.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
poolSize.descriptorCount = 1;
VkDescriptorPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
poolInfo.poolSizeCount = 1;
poolInfo.pPoolSizes = &poolSize;
poolInfo.maxSets = 1;
if (vkCreateDescriptorPool(device, &poolInfo, nullptr, &descriptorPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor pool!");
}
}
void createDescriptorSet() {
VkDescriptorSetLayout layouts[] = {descriptorSetLayout};
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = descriptorPool;
allocInfo.descriptorSetCount = 1;
allocInfo.pSetLayouts = layouts;
if (vkAllocateDescriptorSets(device, &allocInfo, &descriptorSet) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate descriptor set!");
}
VkDescriptorBufferInfo bufferInfo = {};
bufferInfo.buffer = uniformBuffer;
bufferInfo.offset = 0;
bufferInfo.range = sizeof(UniformBufferObject);
VkWriteDescriptorSet descriptorWrite = {};
descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrite.dstSet = descriptorSet;
descriptorWrite.dstBinding = 0;
descriptorWrite.dstArrayElement = 0;
descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptorWrite.descriptorCount = 1;
descriptorWrite.pBufferInfo = &bufferInfo;
descriptorWrite.pImageInfo = nullptr; // Optional
descriptorWrite.pTexelBufferView = nullptr; // Optional
vkUpdateDescriptorSets(device, 1, &descriptorWrite, 0, nullptr);
}
void drawFrame() {
uint32_t imageIndex;
vkAcquireNextImageKHR(device, swapChain,
std::numeric_limits<uint64_t>::max(),
imageAvailableSemaphore, VK_NULL_HANDLE, &imageIndex);
if (!startSubmit) {
submitVulkan(imageIndex);
startSubmit = 1;
} else {
submitVulkanCuda(imageIndex);
}
VkPresentInfoKHR presentInfo = {};
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
VkSemaphore signalSemaphores[] = {renderFinishedSemaphore};
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = signalSemaphores;
VkSwapchainKHR swapChains[] = {swapChain};
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = swapChains;
presentInfo.pImageIndices = &imageIndex;
presentInfo.pResults = nullptr; // Optional
vkQueuePresentKHR(presentQueue, &presentInfo);
cudaUpdateVertexBuffer();
// Added sleep of 5 millisecs so that CPU does not submit too much work to
// GPU
std::this_thread::sleep_for(std::chrono::microseconds(5000));
}
void submitVulkan(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphore};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT};
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphore,
vkUpdateCudaVertexBufSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, VK_NULL_HANDLE) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
void submitVulkanCuda(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphore,
cudaUpdateVkVertexBufSemaphore};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
submitInfo.waitSemaphoreCount = 2;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphore,
vkUpdateCudaVertexBufSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, VK_NULL_HANDLE) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
void createSyncObjects() {
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&imageAvailableSemaphore) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&renderFinishedSemaphore) != VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a frame!");
}
}
void createSyncObjectsExt() {
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
memset(&semaphoreInfo, 0, sizeof(semaphoreInfo));
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportSemaphoreWin32HandleInfoKHR
vulkanExportSemaphoreWin32HandleInfoKHR = {};
vulkanExportSemaphoreWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;
vulkanExportSemaphoreWin32HandleInfoKHR.pNext = NULL;
vulkanExportSemaphoreWin32HandleInfoKHR.pAttributes =
&winSecurityAttributes;
vulkanExportSemaphoreWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportSemaphoreWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportSemaphoreCreateInfoKHR vulkanExportSemaphoreCreateInfo = {};
vulkanExportSemaphoreCreateInfo.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
#ifdef _WIN64
vulkanExportSemaphoreCreateInfo.pNext =
IsWindows8OrGreater() ? &vulkanExportSemaphoreWin32HandleInfoKHR : NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT;
#else
vulkanExportSemaphoreCreateInfo.pNext = NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
#endif
semaphoreInfo.pNext = &vulkanExportSemaphoreCreateInfo;
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&cudaUpdateVkVertexBufSemaphore) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&vkUpdateCudaVertexBufSemaphore) != VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a CUDA-Vulkan!");
}
}
void cudaVkImportVertexMem() {
cudaExternalMemoryHandleDesc cudaExtMemHandleDesc;
memset(&cudaExtMemHandleDesc, 0, sizeof(cudaExtMemHandleDesc));
#ifdef _WIN64
cudaExtMemHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalMemoryHandleTypeOpaqueWin32
: cudaExternalMemoryHandleTypeOpaqueWin32Kmt;
cudaExtMemHandleDesc.handle.win32.handle = getVkMemHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT);
#else
cudaExtMemHandleDesc.type = cudaExternalMemoryHandleTypeOpaqueFd;
cudaExtMemHandleDesc.handle.fd =
getVkMemHandle(VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT);
#endif
cudaExtMemHandleDesc.size = sizeof(Vertex) * vertexBufSize;
checkCudaErrors(cudaImportExternalMemory(&cudaExtMemVertexBuffer,
&cudaExtMemHandleDesc));
cudaExternalMemoryBufferDesc cudaExtBufferDesc;
cudaExtBufferDesc.offset = 0;
cudaExtBufferDesc.size = sizeof(Vertex) * vertexBufSize;
cudaExtBufferDesc.flags = 0;
checkCudaErrors(cudaExternalMemoryGetMappedBuffer(
&cudaDevVertptr, cudaExtMemVertexBuffer, &cudaExtBufferDesc));
printf("CUDA Imported Vulkan vertex buffer\n");
}
void cudaVkImportSemaphore() {
cudaExternalSemaphoreHandleDesc externalSemaphoreHandleDesc;
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
cudaUpdateVkVertexBufSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd =
getVkSemaphoreHandle(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
cudaUpdateVkVertexBufSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(
&cudaExtCudaUpdateVkVertexBufSemaphore, &externalSemaphoreHandleDesc));
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
vkUpdateCudaVertexBufSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd =
getVkSemaphoreHandle(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
vkUpdateCudaVertexBufSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(
&cudaExtVkUpdateCudaVertexBufSemaphore, &externalSemaphoreHandleDesc));
printf("CUDA Imported Vulkan semaphore\n");
}
#ifdef _WIN64 // For windows
HANDLE getVkMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
HANDLE handle;
VkMemoryGetWin32HandleInfoKHR vkMemoryGetWin32HandleInfoKHR = {};
vkMemoryGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR;
vkMemoryGetWin32HandleInfoKHR.pNext = NULL;
vkMemoryGetWin32HandleInfoKHR.memory = vertexBufferMemory;
vkMemoryGetWin32HandleInfoKHR.handleType =
(VkExternalMemoryHandleTypeFlagBitsKHR)externalMemoryHandleType;
fpGetMemoryWin32HandleKHR(device, &vkMemoryGetWin32HandleInfoKHR, &handle);
return handle;
}
#else
int getVkMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
if (externalMemoryHandleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT) {
int fd;
VkMemoryGetFdInfoKHR vkMemoryGetFdInfoKHR = {};
vkMemoryGetFdInfoKHR.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
vkMemoryGetFdInfoKHR.pNext = NULL;
vkMemoryGetFdInfoKHR.memory = vertexBufferMemory;
vkMemoryGetFdInfoKHR.handleType =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetMemoryFdKHR(device, &vkMemoryGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
#endif
#ifdef _WIN64
HANDLE getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
HANDLE handle;
VkSemaphoreGetWin32HandleInfoKHR vulkanSemaphoreGetWin32HandleInfoKHR = {};
vulkanSemaphoreGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR;
vulkanSemaphoreGetWin32HandleInfoKHR.pNext = NULL;
vulkanSemaphoreGetWin32HandleInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetWin32HandleInfoKHR.handleType =
externalSemaphoreHandleType;
fpGetSemaphoreWin32HandleKHR(device, &vulkanSemaphoreGetWin32HandleInfoKHR,
&handle);
return handle;
}
#else
int getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
if (externalSemaphoreHandleType ==
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
int fd;
VkSemaphoreGetFdInfoKHR vulkanSemaphoreGetFdInfoKHR = {};
vulkanSemaphoreGetFdInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
vulkanSemaphoreGetFdInfoKHR.pNext = NULL;
vulkanSemaphoreGetFdInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetFdInfoKHR.handleType =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetSemaphoreFdKHR(device, &vulkanSemaphoreGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
#endif
void cudaVkSemaphoreSignal(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreSignalParams extSemaphoreSignalParams;
memset(&extSemaphoreSignalParams, 0, sizeof(extSemaphoreSignalParams));
extSemaphoreSignalParams.params.fence.value = 0;
extSemaphoreSignalParams.flags = 0;
checkCudaErrors(cudaSignalExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreSignalParams, 1, streamToRun));
}
void cudaVkSemaphoreWait(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreWaitParams extSemaphoreWaitParams;
memset(&extSemaphoreWaitParams, 0, sizeof(extSemaphoreWaitParams));
extSemaphoreWaitParams.params.fence.value = 0;
extSemaphoreWaitParams.flags = 0;
checkCudaErrors(cudaWaitExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreWaitParams, 1, streamToRun));
}
void cudaUpdateVertexBuffer() {
cudaVkSemaphoreWait(cudaExtVkUpdateCudaVertexBufSemaphore);
dim3 block(16, 16, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
Vertex* pos = (Vertex*)cudaDevVertptr;
AnimTime += 0.01f;
hipLaunchKernelGGL(( sinewave_gen_kernel), dim3(grid), dim3(block), 0, streamToRun, pos, mesh_width,
mesh_height, AnimTime);
cudaVkSemaphoreSignal(cudaExtCudaUpdateVkVertexBufSemaphore);
}
void cleanup() {
if (enableValidationLayers) {
DestroyDebugReportCallbackEXT(instance, callback, nullptr);
}
vkDestroySemaphore(device, renderFinishedSemaphore, nullptr);
vkDestroySemaphore(device, imageAvailableSemaphore, nullptr);
checkCudaErrors(
cudaDestroyExternalSemaphore(cudaExtCudaUpdateVkVertexBufSemaphore));
vkDestroySemaphore(device, cudaUpdateVkVertexBufSemaphore, nullptr);
checkCudaErrors(
cudaDestroyExternalSemaphore(cudaExtVkUpdateCudaVertexBufSemaphore));
vkDestroySemaphore(device, vkUpdateCudaVertexBufSemaphore, nullptr);
vkDestroyCommandPool(device, commandPool, nullptr);
for (auto framebuffer : swapChainFramebuffers) {
vkDestroyFramebuffer(device, framebuffer, nullptr);
}
for (auto imageView : swapChainImageViews) {
vkDestroyImageView(device, imageView, nullptr);
}
vkDestroyPipeline(device, graphicsPipeline, nullptr);
vkDestroyPipelineLayout(device, pipelineLayout, nullptr);
vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr);
vkDestroyBuffer(device, uniformBuffer, nullptr);
vkFreeMemory(device, uniformBufferMemory, nullptr);
vkDestroyRenderPass(device, renderPass, nullptr);
vkDestroySwapchainKHR(device, swapChain, nullptr);
checkCudaErrors(cudaDestroyExternalMemory(cudaExtMemVertexBuffer));
vkDestroyBuffer(device, vertexBuffer, nullptr);
vkFreeMemory(device, vertexBufferMemory, nullptr);
vkDestroyDescriptorPool(device, descriptorPool, nullptr);
vkDestroyDevice(device, nullptr);
vkDestroySurfaceKHR(instance, surface, nullptr);
vkDestroyInstance(instance, nullptr);
glfwDestroyWindow(window);
glfwTerminate();
}
};
int main(int argc, char* argv[]) {
execution_path = argv[0];
vulkanCudaApp app;
try {
app.run();
} catch (const std::runtime_error& e) {
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
558f50539bc17ee9f94d3cba2764ce2d6c10fbe5.cu
|
/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define GLFW_INCLUDE_VULKAN
#include <GLFW/glfw3.h>
#include <vulkan/vulkan.h>
#include <algorithm>
#include <array>
#include <chrono>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <functional>
#include <iostream>
#include <set>
#include <stdexcept>
#include <thread>
#include <vector>
#ifdef _WIN64
#include <aclapi.h>
#include <dxgi1_2.h>
#include <vulkan/vulkan_win32.h>
#include <windows.h>
#include <VersionHelpers.h>
#define _USE_MATH_DEFINES
#endif
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "linmath.h"
#define WIDTH 800
#define HEIGHT 600
#define VULKAN_VALIDATION 0
const std::vector<const char*> validationLayers = {
"VK_LAYER_LUNARG_standard_validation"};
#if VULKAN_VALIDATION
const bool enableValidationLayers = true;
#else
const bool enableValidationLayers = false;
#endif
struct QueueFamilyIndices {
int graphicsFamily = -1;
int presentFamily = -1;
bool isComplete() { return graphicsFamily >= 0 && presentFamily >= 0; }
};
const std::vector<const char*> deviceExtensions = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
#ifdef _WIN64
VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME,
#else
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
#endif
};
#ifdef _WIN64
class WindowsSecurityAttributes {
protected:
SECURITY_ATTRIBUTES m_winSecurityAttributes;
PSECURITY_DESCRIPTOR m_winPSecurityDescriptor;
public:
WindowsSecurityAttributes();
SECURITY_ATTRIBUTES* operator&();
~WindowsSecurityAttributes();
};
WindowsSecurityAttributes::WindowsSecurityAttributes() {
m_winPSecurityDescriptor = (PSECURITY_DESCRIPTOR)calloc(
1, SECURITY_DESCRIPTOR_MIN_LENGTH + 2 * sizeof(void**));
// CHECK_NEQ(m_winPSecurityDescriptor, (PSECURITY_DESCRIPTOR)NULL);
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
InitializeSecurityDescriptor(m_winPSecurityDescriptor,
SECURITY_DESCRIPTOR_REVISION);
SID_IDENTIFIER_AUTHORITY sidIdentifierAuthority =
SECURITY_WORLD_SID_AUTHORITY;
AllocateAndInitializeSid(&sidIdentifierAuthority, 1, SECURITY_WORLD_RID, 0, 0,
0, 0, 0, 0, 0, ppSID);
EXPLICIT_ACCESS explicitAccess;
ZeroMemory(&explicitAccess, sizeof(EXPLICIT_ACCESS));
explicitAccess.grfAccessPermissions =
STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL;
explicitAccess.grfAccessMode = SET_ACCESS;
explicitAccess.grfInheritance = INHERIT_ONLY;
explicitAccess.Trustee.TrusteeForm = TRUSTEE_IS_SID;
explicitAccess.Trustee.TrusteeType = TRUSTEE_IS_WELL_KNOWN_GROUP;
explicitAccess.Trustee.ptstrName = (LPTSTR)*ppSID;
SetEntriesInAcl(1, &explicitAccess, NULL, ppACL);
SetSecurityDescriptorDacl(m_winPSecurityDescriptor, TRUE, *ppACL, FALSE);
m_winSecurityAttributes.nLength = sizeof(m_winSecurityAttributes);
m_winSecurityAttributes.lpSecurityDescriptor = m_winPSecurityDescriptor;
m_winSecurityAttributes.bInheritHandle = TRUE;
}
SECURITY_ATTRIBUTES* WindowsSecurityAttributes::operator&() {
return &m_winSecurityAttributes;
}
WindowsSecurityAttributes::~WindowsSecurityAttributes() {
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
if (*ppSID) {
FreeSid(*ppSID);
}
if (*ppACL) {
LocalFree(*ppACL);
}
free(m_winPSecurityDescriptor);
}
#endif
struct UniformBufferObject {
mat4x4 model;
mat4x4 view;
mat4x4 proj;
};
struct SwapChainSupportDetails {
VkSurfaceCapabilitiesKHR capabilities;
std::vector<VkSurfaceFormatKHR> formats;
std::vector<VkPresentModeKHR> presentModes;
};
struct Vertex {
vec4 pos;
vec3 color;
static VkVertexInputBindingDescription getBindingDescription() {
VkVertexInputBindingDescription bindingDescription = {};
bindingDescription.binding = 0;
bindingDescription.stride = sizeof(Vertex);
bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
return bindingDescription;
}
static std::array<VkVertexInputAttributeDescription, 2>
getAttributeDescriptions() {
std::array<VkVertexInputAttributeDescription, 2> attributeDescriptions = {};
attributeDescriptions[0].binding = 0;
attributeDescriptions[0].location = 0;
attributeDescriptions[0].format = VK_FORMAT_R32G32B32A32_SFLOAT;
attributeDescriptions[0].offset = offsetof(Vertex, pos);
attributeDescriptions[1].binding = 0;
attributeDescriptions[1].location = 1;
attributeDescriptions[1].format = VK_FORMAT_R32G32B32_SFLOAT;
attributeDescriptions[1].offset = offsetof(Vertex, color);
return attributeDescriptions;
}
};
size_t mesh_width = 0, mesh_height = 0;
std::string execution_path;
__global__ void sinewave_gen_kernel(Vertex* vertices, unsigned int width,
unsigned int height, float time) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u * freq + time) * cosf(v * freq + time) * 0.5f;
if (y < height && x < width) {
// write output vertex
vertices[y * width + x].pos[0] = u;
vertices[y * width + x].pos[1] = w;
vertices[y * width + x].pos[2] = v;
vertices[y * width + x].pos[3] = 1.0f;
vertices[y * width + x].color[0] = 1.0f;
vertices[y * width + x].color[1] = 0.0f;
vertices[y * width + x].color[2] = 0.0f;
}
}
class vulkanCudaApp {
public:
void run() {
initWindow();
initVulkan();
initCuda();
mainLoop();
cleanup();
}
private:
GLFWwindow* window;
VkInstance instance;
VkPhysicalDevice physicalDevice = VK_NULL_HANDLE;
uint8_t vkDeviceUUID[VK_UUID_SIZE];
VkDevice device;
VkQueue graphicsQueue;
VkQueue presentQueue;
VkSurfaceKHR surface;
VkSwapchainKHR swapChain;
std::vector<VkImage> swapChainImages;
VkFormat swapChainImageFormat;
VkExtent2D swapChainExtent;
std::vector<VkImageView> swapChainImageViews;
VkDescriptorSetLayout descriptorSetLayout;
VkDescriptorPool descriptorPool;
VkDescriptorSet descriptorSet;
VkPipelineLayout pipelineLayout;
VkRenderPass renderPass;
VkPipeline graphicsPipeline;
std::vector<VkFramebuffer> swapChainFramebuffers;
VkCommandPool commandPool;
VkBuffer vertexBuffer;
VkDeviceMemory vertexBufferMemory;
VkBuffer uniformBuffer;
VkDeviceMemory uniformBufferMemory;
std::vector<VkCommandBuffer> commandBuffers;
VkSemaphore imageAvailableSemaphore;
VkSemaphore renderFinishedSemaphore;
VkSemaphore cudaUpdateVkVertexBufSemaphore;
VkSemaphore vkUpdateCudaVertexBufSemaphore;
size_t vertexBufSize = 0;
bool startSubmit = 0;
double AnimTime = 1.0f;
VkDebugReportCallbackEXT callback;
#ifdef _WIN64
PFN_vkGetMemoryWin32HandleKHR fpGetMemoryWin32HandleKHR;
PFN_vkGetSemaphoreWin32HandleKHR fpGetSemaphoreWin32HandleKHR;
#else
PFN_vkGetMemoryFdKHR fpGetMemoryFdKHR;
PFN_vkGetSemaphoreFdKHR fpGetSemaphoreFdKHR;
#endif
PFN_vkGetPhysicalDeviceProperties2 fpGetPhysicalDeviceProperties2;
// CUDA stuff
cudaExternalMemory_t cudaExtMemVertexBuffer;
cudaExternalSemaphore_t cudaExtCudaUpdateVkVertexBufSemaphore;
cudaExternalSemaphore_t cudaExtVkUpdateCudaVertexBufSemaphore;
void* cudaDevVertptr = NULL;
cudaStream_t streamToRun;
bool checkValidationLayerSupport() {
uint32_t layerCount;
vkEnumerateInstanceLayerProperties(&layerCount, nullptr);
std::vector<VkLayerProperties> availableLayers(layerCount);
vkEnumerateInstanceLayerProperties(&layerCount, availableLayers.data());
for (const char* layerName : validationLayers) {
bool layerFound = false;
for (const auto& layerProperties : availableLayers) {
if (strcmp(layerName, layerProperties.layerName) == 0) {
layerFound = true;
break;
}
}
if (!layerFound) {
return false;
}
}
return true;
}
static VKAPI_ATTR VkBool32 VKAPI_CALL
debugCallback(VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType,
uint64_t obj, size_t location, int32_t code,
const char* layerPrefix, const char* msg, void* userData) {
std::cerr << "validation layer: " << msg << std::endl;
return VK_FALSE;
}
VkResult CreateDebugReportCallbackEXT(
VkInstance instance,
const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDebugReportCallbackEXT* pCallback) {
auto func = (PFN_vkCreateDebugReportCallbackEXT)vkGetInstanceProcAddr(
instance, "vkCreateDebugReportCallbackEXT");
if (func != nullptr) {
return func(instance, pCreateInfo, pAllocator, pCallback);
} else {
return VK_ERROR_EXTENSION_NOT_PRESENT;
}
}
void DestroyDebugReportCallbackEXT(VkInstance instance,
VkDebugReportCallbackEXT callback,
const VkAllocationCallbacks* pAllocator) {
auto func = (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr(
instance, "vkDestroyDebugReportCallbackEXT");
if (func != nullptr) {
func(instance, callback, pAllocator);
}
}
void setupDebugCallback() {
if (!enableValidationLayers) return;
VkDebugReportCallbackCreateInfoEXT createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT;
createInfo.flags =
VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
createInfo.pfnCallback = debugCallback;
if (CreateDebugReportCallbackEXT(instance, &createInfo, nullptr,
&callback) != VK_SUCCESS) {
throw std::runtime_error("failed to set up debug callback!");
}
}
void initWindow() {
glfwInit();
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
glfwWindowHint(GLFW_RESIZABLE, GLFW_FALSE);
window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan-CUDA Interop Sinewave",
nullptr, nullptr);
}
void createInstance() {
if (enableValidationLayers && !checkValidationLayerSupport()) {
throw std::runtime_error(
"validation layers requested, but not available!");
}
VkApplicationInfo appInfo = {};
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
appInfo.pApplicationName = "Vulkan CUDA Sinewave";
appInfo.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.pEngineName = "No Engine";
appInfo.engineVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.apiVersion = VK_API_VERSION_1_0;
VkInstanceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
createInfo.pApplicationInfo = &appInfo;
uint32_t glfwExtensionCount = 0;
const char** glfwExtensions;
glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
std::vector<const char*> enabledExtensionNameList;
enabledExtensionNameList.push_back(
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
enabledExtensionNameList.push_back(
VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME);
enabledExtensionNameList.push_back(
VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME);
for (int i = 0; i < glfwExtensionCount; i++) {
enabledExtensionNameList.push_back(glfwExtensions[i]);
}
if (enableValidationLayers) {
enabledExtensionNameList.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
} else {
createInfo.enabledLayerCount = 0;
}
createInfo.enabledExtensionCount = enabledExtensionNameList.size();
createInfo.ppEnabledExtensionNames = enabledExtensionNameList.data();
if (vkCreateInstance(&createInfo, nullptr, &instance) != VK_SUCCESS) {
throw std::runtime_error("failed to create instance!");
} else {
std::cout << "Instance created successfully!!\n";
}
fpGetPhysicalDeviceProperties2 =
(PFN_vkGetPhysicalDeviceProperties2)vkGetInstanceProcAddr(
instance, "vkGetPhysicalDeviceProperties2");
if (fpGetPhysicalDeviceProperties2 == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetPhysicalDeviceProperties2KHR\" not "
"found.\n");
}
#ifdef _WIN64
fpGetMemoryWin32HandleKHR =
(PFN_vkGetMemoryWin32HandleKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryWin32HandleKHR");
if (fpGetMemoryWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryFdKHR");
if (fpGetMemoryFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryFdKHR\" not found.\n");
}
#endif
}
void initVulkan() {
createInstance();
setupDebugCallback();
createSurface();
pickPhysicalDevice();
createLogicalDevice();
getKhrExtensionsFn();
createSwapChain();
createImageViews();
createRenderPass();
createDescriptorSetLayout();
createGraphicsPipeline();
createFramebuffers();
createCommandPool();
createVertexBuffer();
createUniformBuffer();
createDescriptorPool();
createDescriptorSet();
createCommandBuffers();
createSyncObjects();
createSyncObjectsExt();
}
void initCuda() {
setCudaVkDevice();
cudaVkImportVertexMem();
cudaInitVertexMem();
cudaVkImportSemaphore();
}
void createSurface() {
if (glfwCreateWindowSurface(instance, window, nullptr, &surface) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create window surface!");
}
}
void pickPhysicalDevice() {
uint32_t deviceCount = 0;
vkEnumeratePhysicalDevices(instance, &deviceCount, nullptr);
if (deviceCount == 0) {
throw std::runtime_error("failed to find GPUs with Vulkan support!");
}
std::vector<VkPhysicalDevice> devices(deviceCount);
vkEnumeratePhysicalDevices(instance, &deviceCount, devices.data());
for (const auto& device : devices) {
if (isDeviceSuitable(device)) {
physicalDevice = device;
break;
}
}
if (physicalDevice == VK_NULL_HANDLE) {
throw std::runtime_error("failed to find a suitable GPU!");
}
std::cout << "Selected physical device = " << physicalDevice << std::endl;
VkPhysicalDeviceIDProperties vkPhysicalDeviceIDProperties = {};
vkPhysicalDeviceIDProperties.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES;
vkPhysicalDeviceIDProperties.pNext = NULL;
VkPhysicalDeviceProperties2 vkPhysicalDeviceProperties2 = {};
vkPhysicalDeviceProperties2.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
vkPhysicalDeviceProperties2.pNext = &vkPhysicalDeviceIDProperties;
fpGetPhysicalDeviceProperties2(physicalDevice,
&vkPhysicalDeviceProperties2);
memcpy(vkDeviceUUID, vkPhysicalDeviceIDProperties.deviceUUID,
sizeof(vkDeviceUUID));
}
int setCudaVkDevice() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceCount(&device_count));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the GPU which is selected by Vulkan
while (current_device < device_count) {
cudaGetDeviceProperties(&deviceProp, current_device);
if ((deviceProp.computeMode != cudaComputeModeProhibited)) {
// Compare the cuda device UUID with vulkan UUID
int ret = memcmp(&deviceProp.uuid, &vkDeviceUUID, VK_UUID_SIZE);
if (ret == 0) {
checkCudaErrors(cudaSetDevice(current_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, current_device));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, deviceProp.name, deviceProp.major,
deviceProp.minor);
return current_device;
}
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No Vulkan-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
bool isDeviceSuitable(VkPhysicalDevice device) {
QueueFamilyIndices indices = findQueueFamilies(device);
bool extensionsSupported = checkDeviceExtensionSupport(device);
bool swapChainAdequate = false;
if (extensionsSupported) {
SwapChainSupportDetails swapChainSupport = querySwapChainSupport(device);
swapChainAdequate = !swapChainSupport.formats.empty() &&
!swapChainSupport.presentModes.empty();
}
return indices.isComplete() && extensionsSupported && swapChainAdequate;
}
bool checkDeviceExtensionSupport(VkPhysicalDevice device) {
uint32_t extensionCount;
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
nullptr);
std::vector<VkExtensionProperties> availableExtensions(extensionCount);
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
availableExtensions.data());
std::set<std::string> requiredExtensions(deviceExtensions.begin(),
deviceExtensions.end());
for (const auto& extension : availableExtensions) {
requiredExtensions.erase(extension.extensionName);
}
return requiredExtensions.empty();
}
QueueFamilyIndices findQueueFamilies(VkPhysicalDevice device) {
QueueFamilyIndices indices;
uint32_t queueFamilyCount = 0;
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
nullptr);
std::vector<VkQueueFamilyProperties> queueFamilies(queueFamilyCount);
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
queueFamilies.data());
int i = 0;
for (const auto& queueFamily : queueFamilies) {
if (queueFamily.queueCount > 0 &&
queueFamily.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
indices.graphicsFamily = i;
}
VkBool32 presentSupport = false;
vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface, &presentSupport);
if (queueFamily.queueCount > 0 && presentSupport) {
indices.presentFamily = i;
}
if (indices.isComplete()) {
break;
}
i++;
}
return indices;
}
SwapChainSupportDetails querySwapChainSupport(VkPhysicalDevice device) {
SwapChainSupportDetails details;
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface,
&details.capabilities);
uint32_t formatCount;
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
nullptr);
if (formatCount != 0) {
details.formats.resize(formatCount);
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
details.formats.data());
}
uint32_t presentModeCount;
vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface,
&presentModeCount, nullptr);
if (presentModeCount != 0) {
details.presentModes.resize(presentModeCount);
vkGetPhysicalDeviceSurfacePresentModesKHR(
device, surface, &presentModeCount, details.presentModes.data());
}
return details;
}
VkSurfaceFormatKHR chooseSwapSurfaceFormat(
const std::vector<VkSurfaceFormatKHR>& availableFormats) {
if (availableFormats.size() == 1 &&
availableFormats[0].format == VK_FORMAT_UNDEFINED) {
return {VK_FORMAT_B8G8R8A8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR};
}
for (const auto& availableFormat : availableFormats) {
if (availableFormat.format == VK_FORMAT_B8G8R8A8_UNORM &&
availableFormat.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
return availableFormat;
}
}
return availableFormats[0];
}
VkPresentModeKHR chooseSwapPresentMode(
const std::vector<VkPresentModeKHR> availablePresentModes) {
VkPresentModeKHR bestMode = VK_PRESENT_MODE_FIFO_KHR;
for (const auto& availablePresentMode : availablePresentModes) {
if (availablePresentMode == VK_PRESENT_MODE_MAILBOX_KHR) {
return availablePresentMode;
} else if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
bestMode = availablePresentMode;
}
}
return bestMode;
}
VkExtent2D chooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities) {
if (capabilities.currentExtent.width !=
std::numeric_limits<uint32_t>::max()) {
return capabilities.currentExtent;
} else {
VkExtent2D actualExtent = {WIDTH, HEIGHT};
actualExtent.width = std::max(
capabilities.minImageExtent.width,
std::min(capabilities.maxImageExtent.width, actualExtent.width));
actualExtent.height = std::max(
capabilities.minImageExtent.height,
std::min(capabilities.maxImageExtent.height, actualExtent.height));
return actualExtent;
}
}
void createLogicalDevice() {
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos;
std::set<int> uniqueQueueFamilies = {indices.graphicsFamily,
indices.presentFamily};
float queuePriority = 1.0f;
for (int queueFamily : uniqueQueueFamilies) {
VkDeviceQueueCreateInfo queueCreateInfo = {};
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = queueFamily;
queueCreateInfo.queueCount = 1;
queueCreateInfo.pQueuePriorities = &queuePriority;
queueCreateInfos.push_back(queueCreateInfo);
}
VkPhysicalDeviceFeatures deviceFeatures = {};
VkDeviceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
createInfo.pQueueCreateInfos = queueCreateInfos.data();
createInfo.queueCreateInfoCount = queueCreateInfos.size();
createInfo.pEnabledFeatures = &deviceFeatures;
std::vector<const char*> enabledExtensionNameList;
for (int i = 0; i < deviceExtensions.size(); i++) {
enabledExtensionNameList.push_back(deviceExtensions[i]);
}
if (enableValidationLayers) {
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
} else {
createInfo.enabledLayerCount = 0;
}
createInfo.enabledExtensionCount =
static_cast<uint32_t>(enabledExtensionNameList.size());
createInfo.ppEnabledExtensionNames = enabledExtensionNameList.data();
if (vkCreateDevice(physicalDevice, &createInfo, nullptr, &device) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create logical device!");
}
vkGetDeviceQueue(device, indices.graphicsFamily, 0, &graphicsQueue);
vkGetDeviceQueue(device, indices.presentFamily, 0, &presentQueue);
}
void createSwapChain() {
SwapChainSupportDetails swapChainSupport =
querySwapChainSupport(physicalDevice);
VkSurfaceFormatKHR surfaceFormat =
chooseSwapSurfaceFormat(swapChainSupport.formats);
VkPresentModeKHR presentMode =
chooseSwapPresentMode(swapChainSupport.presentModes);
VkExtent2D extent = chooseSwapExtent(swapChainSupport.capabilities);
uint32_t imageCount = swapChainSupport.capabilities.minImageCount + 1;
if (swapChainSupport.capabilities.maxImageCount > 0 &&
imageCount > swapChainSupport.capabilities.maxImageCount) {
imageCount = swapChainSupport.capabilities.maxImageCount;
}
VkSwapchainCreateInfoKHR createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
createInfo.surface = surface;
createInfo.minImageCount = imageCount;
createInfo.imageFormat = surfaceFormat.format;
createInfo.imageColorSpace = surfaceFormat.colorSpace;
createInfo.imageExtent = extent;
createInfo.imageArrayLayers = 1;
createInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
uint32_t queueFamilyIndices[] = {(uint32_t)indices.graphicsFamily,
(uint32_t)indices.presentFamily};
if (indices.graphicsFamily != indices.presentFamily) {
createInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
createInfo.queueFamilyIndexCount = 2;
createInfo.pQueueFamilyIndices = queueFamilyIndices;
} else {
createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
createInfo.queueFamilyIndexCount = 0; // Optional
createInfo.pQueueFamilyIndices = nullptr; // Optional
}
createInfo.preTransform = swapChainSupport.capabilities.currentTransform;
createInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
createInfo.presentMode = presentMode;
createInfo.clipped = VK_TRUE;
createInfo.oldSwapchain = VK_NULL_HANDLE;
if (vkCreateSwapchainKHR(device, &createInfo, nullptr, &swapChain) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create swap chain!");
} else {
std::cout << "Swapchain created!!\n";
}
vkGetSwapchainImagesKHR(device, swapChain, &imageCount, nullptr);
swapChainImages.resize(imageCount);
vkGetSwapchainImagesKHR(device, swapChain, &imageCount,
swapChainImages.data());
swapChainImageFormat = surfaceFormat.format;
swapChainExtent = extent;
}
void createImageViews() {
swapChainImageViews.resize(swapChainImages.size());
for (size_t i = 0; i < swapChainImages.size(); i++) {
VkImageViewCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
createInfo.image = swapChainImages[i];
createInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
createInfo.format = swapChainImageFormat;
createInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
createInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
createInfo.subresourceRange.baseMipLevel = 0;
createInfo.subresourceRange.levelCount = 1;
createInfo.subresourceRange.baseArrayLayer = 0;
createInfo.subresourceRange.layerCount = 1;
if (vkCreateImageView(device, &createInfo, nullptr,
&swapChainImageViews[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to create image views!");
}
}
}
void createDescriptorSetLayout() {
VkDescriptorSetLayoutBinding uboLayoutBinding = {};
uboLayoutBinding.binding = 0;
uboLayoutBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
uboLayoutBinding.descriptorCount = 1;
uboLayoutBinding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
uboLayoutBinding.pImmutableSamplers = nullptr; // Optional
VkDescriptorSetLayoutCreateInfo layoutInfo = {};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.bindingCount = 1;
layoutInfo.pBindings = &uboLayoutBinding;
if (vkCreateDescriptorSetLayout(device, &layoutInfo, nullptr,
&descriptorSetLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor set layout!");
}
}
void createGraphicsPipeline() {
auto vertShaderCode = readFile("shader_sine.vert");
auto fragShaderCode = readFile("shader_sine.frag");
VkShaderModule vertShaderModule;
VkShaderModule fragShaderModule;
vertShaderModule = createShaderModule(vertShaderCode);
fragShaderModule = createShaderModule(fragShaderCode);
VkPipelineShaderStageCreateInfo vertShaderStageInfo = {};
vertShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertShaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertShaderStageInfo.module = vertShaderModule;
vertShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo fragShaderStageInfo = {};
fragShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragShaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragShaderStageInfo.module = fragShaderModule;
fragShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo shaderStages[] = {vertShaderStageInfo,
fragShaderStageInfo};
VkPipelineVertexInputStateCreateInfo vertexInputInfo = {};
vertexInputInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
auto bindingDescription = Vertex::getBindingDescription();
auto attributeDescriptions = Vertex::getAttributeDescriptions();
vertexInputInfo.vertexBindingDescriptionCount = 1;
vertexInputInfo.pVertexBindingDescriptions = &bindingDescription;
vertexInputInfo.vertexAttributeDescriptionCount =
static_cast<uint32_t>(attributeDescriptions.size());
vertexInputInfo.pVertexAttributeDescriptions = attributeDescriptions.data();
VkPipelineInputAssemblyStateCreateInfo inputAssembly = {};
inputAssembly.sType =
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
inputAssembly.primitiveRestartEnable = VK_FALSE;
VkViewport viewport = {};
viewport.x = 0.0f;
viewport.y = 0.0f;
viewport.width = (float)swapChainExtent.width;
viewport.height = (float)swapChainExtent.height;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
VkRect2D scissor = {};
scissor.offset = {0, 0};
scissor.extent = swapChainExtent;
VkPipelineViewportStateCreateInfo viewportState = {};
viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewportState.viewportCount = 1;
viewportState.pViewports = &viewport;
viewportState.scissorCount = 1;
viewportState.pScissors = &scissor;
VkPipelineRasterizationStateCreateInfo rasterizer = {};
rasterizer.sType =
VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterizer.depthClampEnable = VK_FALSE;
rasterizer.rasterizerDiscardEnable = VK_FALSE;
rasterizer.polygonMode = VK_POLYGON_MODE_FILL;
rasterizer.lineWidth = 1.0f;
rasterizer.cullMode = VK_CULL_MODE_BACK_BIT;
rasterizer.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterizer.depthBiasEnable = VK_FALSE;
rasterizer.depthBiasConstantFactor = 0.0f; // Optional
rasterizer.depthBiasClamp = 0.0f; // Optional
rasterizer.depthBiasSlopeFactor = 0.0f; // Optional
VkPipelineMultisampleStateCreateInfo multisampling = {};
multisampling.sType =
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisampling.sampleShadingEnable = VK_FALSE;
multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisampling.minSampleShading = 1.0f; // Optional
multisampling.pSampleMask = nullptr; // Optional
multisampling.alphaToCoverageEnable = VK_FALSE; // Optional
multisampling.alphaToOneEnable = VK_FALSE; // Optional
VkPipelineColorBlendAttachmentState colorBlendAttachment = {};
colorBlendAttachment.colorWriteMask =
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
colorBlendAttachment.blendEnable = VK_FALSE;
colorBlendAttachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE; // Optional
colorBlendAttachment.dstColorBlendFactor =
VK_BLEND_FACTOR_ZERO; // Optional
colorBlendAttachment.colorBlendOp = VK_BLEND_OP_ADD; // Optional
colorBlendAttachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; // Optional
colorBlendAttachment.dstAlphaBlendFactor =
VK_BLEND_FACTOR_ZERO; // Optional
colorBlendAttachment.alphaBlendOp = VK_BLEND_OP_ADD; // Optional
VkPipelineColorBlendStateCreateInfo colorBlending = {};
colorBlending.sType =
VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
colorBlending.logicOpEnable = VK_FALSE;
colorBlending.logicOp = VK_LOGIC_OP_COPY; // Optional
colorBlending.attachmentCount = 1;
colorBlending.pAttachments = &colorBlendAttachment;
colorBlending.blendConstants[0] = 0.0f; // Optional
colorBlending.blendConstants[1] = 0.0f; // Optional
colorBlending.blendConstants[2] = 0.0f; // Optional
colorBlending.blendConstants[3] = 0.0f; // Optional
#if 0
VkDynamicState dynamicStates[] = {
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_LINE_WIDTH
};
VkPipelineDynamicStateCreateInfo dynamicState = {};
dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamicState.dynamicStateCount = 2;
dynamicState.pDynamicStates = dynamicStates;
#endif
VkPipelineLayoutCreateInfo pipelineLayoutInfo = {};
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutInfo.setLayoutCount = 1; // Optional
pipelineLayoutInfo.pSetLayouts = &descriptorSetLayout; // Optional
pipelineLayoutInfo.pushConstantRangeCount = 0; // Optional
pipelineLayoutInfo.pPushConstantRanges = nullptr; // Optional
if (vkCreatePipelineLayout(device, &pipelineLayoutInfo, nullptr,
&pipelineLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create pipeline layout!");
}
VkGraphicsPipelineCreateInfo pipelineInfo = {};
pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipelineInfo.stageCount = 2;
pipelineInfo.pStages = shaderStages;
pipelineInfo.pVertexInputState = &vertexInputInfo;
pipelineInfo.pInputAssemblyState = &inputAssembly;
pipelineInfo.pViewportState = &viewportState;
pipelineInfo.pRasterizationState = &rasterizer;
pipelineInfo.pMultisampleState = &multisampling;
pipelineInfo.pDepthStencilState = nullptr; // Optional
pipelineInfo.pColorBlendState = &colorBlending;
pipelineInfo.pDynamicState = nullptr; // Optional
pipelineInfo.layout = pipelineLayout;
pipelineInfo.renderPass = renderPass;
pipelineInfo.subpass = 0;
pipelineInfo.basePipelineHandle = VK_NULL_HANDLE; // Optional
pipelineInfo.basePipelineIndex = -1; // Optional
if (vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &pipelineInfo,
nullptr, &graphicsPipeline) != VK_SUCCESS) {
throw std::runtime_error("failed to create graphics pipeline!");
} else {
std::cout << "Pipeline created successfully!!\n";
}
vkDestroyShaderModule(device, fragShaderModule, nullptr);
vkDestroyShaderModule(device, vertShaderModule, nullptr);
}
void createRenderPass() {
VkAttachmentDescription colorAttachment = {};
colorAttachment.format = swapChainImageFormat;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
colorAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
colorAttachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkAttachmentReference colorAttachmentRef = {};
colorAttachmentRef.attachment = 0;
colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttachmentRef;
VkRenderPassCreateInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = 1;
renderPassInfo.pAttachments = &colorAttachment;
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
VkSubpassDependency dependency = {};
dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
dependency.dstSubpass = 0;
dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.srcAccessMask = 0;
dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
renderPassInfo.dependencyCount = 1;
renderPassInfo.pDependencies = &dependency;
if (vkCreateRenderPass(device, &renderPassInfo, nullptr, &renderPass) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create render pass!");
}
}
void createFramebuffers() {
swapChainFramebuffers.resize(swapChainImageViews.size());
for (size_t i = 0; i < swapChainImageViews.size(); i++) {
VkImageView attachments[] = {swapChainImageViews[i]};
VkFramebufferCreateInfo framebufferInfo = {};
framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferInfo.renderPass = renderPass;
framebufferInfo.attachmentCount = 1;
framebufferInfo.pAttachments = attachments;
framebufferInfo.width = swapChainExtent.width;
framebufferInfo.height = swapChainExtent.height;
framebufferInfo.layers = 1;
if (vkCreateFramebuffer(device, &framebufferInfo, nullptr,
&swapChainFramebuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to create framebuffer!");
}
}
}
void createCommandPool() {
QueueFamilyIndices queueFamilyIndices = findQueueFamilies(physicalDevice);
VkCommandPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.queueFamilyIndex = queueFamilyIndices.graphicsFamily;
poolInfo.flags = 0; // Optional
if (vkCreateCommandPool(device, &poolInfo, nullptr, &commandPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create command pool!");
}
}
void createBuffer(VkDeviceSize size, VkBufferUsageFlags usage,
VkMemoryPropertyFlags properties, VkBuffer& buffer,
VkDeviceMemory& bufferMemory) {
VkBufferCreateInfo bufferInfo = {};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.size = size;
bufferInfo.usage = usage;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(device, &bufferInfo, nullptr, &buffer) != VK_SUCCESS) {
throw std::runtime_error("failed to create buffer!");
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
if (vkAllocateMemory(device, &allocInfo, nullptr, &bufferMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate buffer memory!");
}
vkBindBufferMemory(device, buffer, bufferMemory, 0);
}
void createBufferExtMem(VkDeviceSize size, VkBufferUsageFlags usage,
VkMemoryPropertyFlags properties,
VkExternalMemoryHandleTypeFlagsKHR extMemHandleType,
VkBuffer& buffer, VkDeviceMemory& bufferMemory) {
VkBufferCreateInfo bufferInfo = {};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.size = size;
bufferInfo.usage = usage;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(device, &bufferInfo, nullptr, &buffer) != VK_SUCCESS) {
throw std::runtime_error("failed to create buffer!");
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportMemoryWin32HandleInfoKHR vulkanExportMemoryWin32HandleInfoKHR = {};
vulkanExportMemoryWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR;
vulkanExportMemoryWin32HandleInfoKHR.pNext = NULL;
vulkanExportMemoryWin32HandleInfoKHR.pAttributes = &winSecurityAttributes;
vulkanExportMemoryWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportMemoryWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportMemoryAllocateInfoKHR vulkanExportMemoryAllocateInfoKHR = {};
vulkanExportMemoryAllocateInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
#ifdef _WIN64
vulkanExportMemoryAllocateInfoKHR.pNext =
extMemHandleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR
? &vulkanExportMemoryWin32HandleInfoKHR
: NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes = extMemHandleType;
#else
vulkanExportMemoryAllocateInfoKHR.pNext = NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
#endif
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.pNext = &vulkanExportMemoryAllocateInfoKHR;
allocInfo.allocationSize = memRequirements.size;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
if (vkAllocateMemory(device, &allocInfo, nullptr, &bufferMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate external buffer memory!");
}
vkBindBufferMemory(device, buffer, bufferMemory, 0);
}
void createVertexBuffer() {
mesh_width = swapChainExtent.width / 2;
mesh_height = swapChainExtent.height / 2;
vertexBufSize = mesh_height * mesh_width;
VkDeviceSize bufferSize = sizeof(Vertex) * vertexBufSize;
#ifdef _WIN64
if (IsWindows8OrGreater()) {
createBufferExtMem(bufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT,
vertexBuffer, vertexBufferMemory);
} else {
createBufferExtMem(bufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
vertexBuffer, vertexBufferMemory);
}
#else
createBufferExtMem(bufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
vertexBuffer, vertexBufferMemory);
#endif
}
void cudaInitVertexMem() {
checkCudaErrors(cudaStreamCreate(&streamToRun));
dim3 block(16, 16, 1);
dim3 grid(mesh_width / 16, mesh_height / 16, 1);
Vertex* vertices = (Vertex*)cudaDevVertptr;
sinewave_gen_kernel<<<grid, block, 0, streamToRun>>>(vertices, mesh_width,
mesh_height, 1.0);
checkCudaErrors(cudaStreamSynchronize(streamToRun));
}
void createUniformBuffer() {
VkDeviceSize bufferSize = sizeof(UniformBufferObject);
createBuffer(bufferSize, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
uniformBuffer, uniformBufferMemory);
}
uint32_t findMemoryType(uint32_t typeFilter,
VkMemoryPropertyFlags properties) {
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties);
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) {
if (typeFilter & (1 << i) && (memProperties.memoryTypes[i].propertyFlags &
properties) == properties) {
return i;
}
}
throw std::runtime_error("failed to find suitable memory type!");
}
void getKhrExtensionsFn() {
#ifdef _WIN64
fpGetSemaphoreWin32HandleKHR =
(PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreWin32HandleKHR");
if (fpGetSemaphoreWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreFdKHR");
if (fpGetSemaphoreFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreFdKHR\" not found.\n");
}
#endif
}
void createCommandBuffers() {
commandBuffers.resize(swapChainFramebuffers.size());
VkCommandBufferAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.commandPool = commandPool;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandBufferCount = (uint32_t)commandBuffers.size();
if (vkAllocateCommandBuffers(device, &allocInfo, commandBuffers.data()) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate command buffers!");
}
for (size_t i = 0; i < commandBuffers.size(); i++) {
VkCommandBufferBeginInfo beginInfo = {};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
beginInfo.pInheritanceInfo = nullptr; // Optional
if (vkBeginCommandBuffer(commandBuffers[i], &beginInfo) != VK_SUCCESS) {
throw std::runtime_error("failed to begin recording command buffer!");
}
VkRenderPassBeginInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = renderPass;
renderPassInfo.framebuffer = swapChainFramebuffers[i];
renderPassInfo.renderArea.offset = {0, 0};
renderPassInfo.renderArea.extent = swapChainExtent;
VkClearValue clearColor = {0.0f, 0.0f, 0.0f, 1.0f};
renderPassInfo.clearValueCount = 1;
renderPassInfo.pClearValues = &clearColor;
vkCmdBeginRenderPass(commandBuffers[i], &renderPassInfo,
VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS,
graphicsPipeline);
VkBuffer vertexBuffers[] = {vertexBuffer};
VkDeviceSize offsets[] = {0};
vkCmdBindVertexBuffers(commandBuffers[i], 0, 1, vertexBuffers, offsets);
vkCmdBindDescriptorSets(commandBuffers[i],
VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout,
0, 1, &descriptorSet, 0, nullptr);
vkCmdDraw(commandBuffers[i], static_cast<uint32_t>(vertexBufSize), 1, 0,
0);
vkCmdEndRenderPass(commandBuffers[i]);
if (vkEndCommandBuffer(commandBuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to record command buffer!");
}
}
}
VkShaderModule createShaderModule(const std::vector<char>& code) {
VkShaderModuleCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
createInfo.codeSize = code.size();
createInfo.pCode = reinterpret_cast<const uint32_t*>(code.data());
VkShaderModule shaderModule;
if (vkCreateShaderModule(device, &createInfo, nullptr, &shaderModule) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create shader module!");
}
return shaderModule;
}
static std::vector<char> readFile(const std::string& filename) {
char* file_path = sdkFindFilePath(filename.c_str(), execution_path.c_str());
std::ifstream file(file_path, std::ios::ate | std::ios::binary);
if (!file.is_open()) {
throw std::runtime_error("failed to open shader spv file!\n");
}
size_t fileSize = (size_t)file.tellg();
std::vector<char> buffer(fileSize);
file.seekg(0);
file.read(buffer.data(), fileSize);
file.close();
return buffer;
}
void mainLoop() {
updateUniformBuffer();
while (!glfwWindowShouldClose(window)) {
glfwPollEvents();
drawFrame();
}
vkDeviceWaitIdle(device);
}
void updateUniformBuffer() {
UniformBufferObject ubo = {};
mat4x4_identity(ubo.model);
mat4x4 Model;
mat4x4_dup(Model, ubo.model);
mat4x4_rotate(ubo.model, Model, 1.0f, 0.0f, 1.0f, degreesToRadians(45.0f));
vec3 eye = {2.0f, 2.0f, 2.0f};
vec3 center = {0.0f, 0.0f, 0.0f};
vec3 up = {0.0f, 0.0f, 1.0f};
mat4x4_look_at(ubo.view, eye, center, up);
mat4x4_perspective(ubo.proj, degreesToRadians(45.0f),
swapChainExtent.width / (float)swapChainExtent.height,
0.1f, 10.0f);
ubo.proj[1][1] *= -1;
void* data;
vkMapMemory(device, uniformBufferMemory, 0, sizeof(ubo), 0, &data);
memcpy(data, &ubo, sizeof(ubo));
vkUnmapMemory(device, uniformBufferMemory);
}
void createDescriptorPool() {
VkDescriptorPoolSize poolSize = {};
poolSize.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
poolSize.descriptorCount = 1;
VkDescriptorPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
poolInfo.poolSizeCount = 1;
poolInfo.pPoolSizes = &poolSize;
poolInfo.maxSets = 1;
if (vkCreateDescriptorPool(device, &poolInfo, nullptr, &descriptorPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor pool!");
}
}
void createDescriptorSet() {
VkDescriptorSetLayout layouts[] = {descriptorSetLayout};
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = descriptorPool;
allocInfo.descriptorSetCount = 1;
allocInfo.pSetLayouts = layouts;
if (vkAllocateDescriptorSets(device, &allocInfo, &descriptorSet) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate descriptor set!");
}
VkDescriptorBufferInfo bufferInfo = {};
bufferInfo.buffer = uniformBuffer;
bufferInfo.offset = 0;
bufferInfo.range = sizeof(UniformBufferObject);
VkWriteDescriptorSet descriptorWrite = {};
descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrite.dstSet = descriptorSet;
descriptorWrite.dstBinding = 0;
descriptorWrite.dstArrayElement = 0;
descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptorWrite.descriptorCount = 1;
descriptorWrite.pBufferInfo = &bufferInfo;
descriptorWrite.pImageInfo = nullptr; // Optional
descriptorWrite.pTexelBufferView = nullptr; // Optional
vkUpdateDescriptorSets(device, 1, &descriptorWrite, 0, nullptr);
}
void drawFrame() {
uint32_t imageIndex;
vkAcquireNextImageKHR(device, swapChain,
std::numeric_limits<uint64_t>::max(),
imageAvailableSemaphore, VK_NULL_HANDLE, &imageIndex);
if (!startSubmit) {
submitVulkan(imageIndex);
startSubmit = 1;
} else {
submitVulkanCuda(imageIndex);
}
VkPresentInfoKHR presentInfo = {};
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
VkSemaphore signalSemaphores[] = {renderFinishedSemaphore};
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = signalSemaphores;
VkSwapchainKHR swapChains[] = {swapChain};
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = swapChains;
presentInfo.pImageIndices = &imageIndex;
presentInfo.pResults = nullptr; // Optional
vkQueuePresentKHR(presentQueue, &presentInfo);
cudaUpdateVertexBuffer();
// Added sleep of 5 millisecs so that CPU does not submit too much work to
// GPU
std::this_thread::sleep_for(std::chrono::microseconds(5000));
}
void submitVulkan(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphore};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT};
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphore,
vkUpdateCudaVertexBufSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, VK_NULL_HANDLE) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
void submitVulkanCuda(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphore,
cudaUpdateVkVertexBufSemaphore};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
submitInfo.waitSemaphoreCount = 2;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphore,
vkUpdateCudaVertexBufSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, VK_NULL_HANDLE) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
void createSyncObjects() {
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&imageAvailableSemaphore) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&renderFinishedSemaphore) != VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a frame!");
}
}
void createSyncObjectsExt() {
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
memset(&semaphoreInfo, 0, sizeof(semaphoreInfo));
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportSemaphoreWin32HandleInfoKHR
vulkanExportSemaphoreWin32HandleInfoKHR = {};
vulkanExportSemaphoreWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;
vulkanExportSemaphoreWin32HandleInfoKHR.pNext = NULL;
vulkanExportSemaphoreWin32HandleInfoKHR.pAttributes =
&winSecurityAttributes;
vulkanExportSemaphoreWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportSemaphoreWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportSemaphoreCreateInfoKHR vulkanExportSemaphoreCreateInfo = {};
vulkanExportSemaphoreCreateInfo.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
#ifdef _WIN64
vulkanExportSemaphoreCreateInfo.pNext =
IsWindows8OrGreater() ? &vulkanExportSemaphoreWin32HandleInfoKHR : NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT;
#else
vulkanExportSemaphoreCreateInfo.pNext = NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
#endif
semaphoreInfo.pNext = &vulkanExportSemaphoreCreateInfo;
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&cudaUpdateVkVertexBufSemaphore) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&vkUpdateCudaVertexBufSemaphore) != VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a CUDA-Vulkan!");
}
}
void cudaVkImportVertexMem() {
cudaExternalMemoryHandleDesc cudaExtMemHandleDesc;
memset(&cudaExtMemHandleDesc, 0, sizeof(cudaExtMemHandleDesc));
#ifdef _WIN64
cudaExtMemHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalMemoryHandleTypeOpaqueWin32
: cudaExternalMemoryHandleTypeOpaqueWin32Kmt;
cudaExtMemHandleDesc.handle.win32.handle = getVkMemHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT);
#else
cudaExtMemHandleDesc.type = cudaExternalMemoryHandleTypeOpaqueFd;
cudaExtMemHandleDesc.handle.fd =
getVkMemHandle(VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT);
#endif
cudaExtMemHandleDesc.size = sizeof(Vertex) * vertexBufSize;
checkCudaErrors(cudaImportExternalMemory(&cudaExtMemVertexBuffer,
&cudaExtMemHandleDesc));
cudaExternalMemoryBufferDesc cudaExtBufferDesc;
cudaExtBufferDesc.offset = 0;
cudaExtBufferDesc.size = sizeof(Vertex) * vertexBufSize;
cudaExtBufferDesc.flags = 0;
checkCudaErrors(cudaExternalMemoryGetMappedBuffer(
&cudaDevVertptr, cudaExtMemVertexBuffer, &cudaExtBufferDesc));
printf("CUDA Imported Vulkan vertex buffer\n");
}
void cudaVkImportSemaphore() {
cudaExternalSemaphoreHandleDesc externalSemaphoreHandleDesc;
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
cudaUpdateVkVertexBufSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd =
getVkSemaphoreHandle(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
cudaUpdateVkVertexBufSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(
&cudaExtCudaUpdateVkVertexBufSemaphore, &externalSemaphoreHandleDesc));
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
vkUpdateCudaVertexBufSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd =
getVkSemaphoreHandle(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
vkUpdateCudaVertexBufSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(
&cudaExtVkUpdateCudaVertexBufSemaphore, &externalSemaphoreHandleDesc));
printf("CUDA Imported Vulkan semaphore\n");
}
#ifdef _WIN64 // For windows
HANDLE getVkMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
HANDLE handle;
VkMemoryGetWin32HandleInfoKHR vkMemoryGetWin32HandleInfoKHR = {};
vkMemoryGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR;
vkMemoryGetWin32HandleInfoKHR.pNext = NULL;
vkMemoryGetWin32HandleInfoKHR.memory = vertexBufferMemory;
vkMemoryGetWin32HandleInfoKHR.handleType =
(VkExternalMemoryHandleTypeFlagBitsKHR)externalMemoryHandleType;
fpGetMemoryWin32HandleKHR(device, &vkMemoryGetWin32HandleInfoKHR, &handle);
return handle;
}
#else
int getVkMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
if (externalMemoryHandleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT) {
int fd;
VkMemoryGetFdInfoKHR vkMemoryGetFdInfoKHR = {};
vkMemoryGetFdInfoKHR.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
vkMemoryGetFdInfoKHR.pNext = NULL;
vkMemoryGetFdInfoKHR.memory = vertexBufferMemory;
vkMemoryGetFdInfoKHR.handleType =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetMemoryFdKHR(device, &vkMemoryGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
#endif
#ifdef _WIN64
HANDLE getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
HANDLE handle;
VkSemaphoreGetWin32HandleInfoKHR vulkanSemaphoreGetWin32HandleInfoKHR = {};
vulkanSemaphoreGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR;
vulkanSemaphoreGetWin32HandleInfoKHR.pNext = NULL;
vulkanSemaphoreGetWin32HandleInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetWin32HandleInfoKHR.handleType =
externalSemaphoreHandleType;
fpGetSemaphoreWin32HandleKHR(device, &vulkanSemaphoreGetWin32HandleInfoKHR,
&handle);
return handle;
}
#else
int getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
if (externalSemaphoreHandleType ==
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
int fd;
VkSemaphoreGetFdInfoKHR vulkanSemaphoreGetFdInfoKHR = {};
vulkanSemaphoreGetFdInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
vulkanSemaphoreGetFdInfoKHR.pNext = NULL;
vulkanSemaphoreGetFdInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetFdInfoKHR.handleType =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetSemaphoreFdKHR(device, &vulkanSemaphoreGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
#endif
void cudaVkSemaphoreSignal(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreSignalParams extSemaphoreSignalParams;
memset(&extSemaphoreSignalParams, 0, sizeof(extSemaphoreSignalParams));
extSemaphoreSignalParams.params.fence.value = 0;
extSemaphoreSignalParams.flags = 0;
checkCudaErrors(cudaSignalExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreSignalParams, 1, streamToRun));
}
void cudaVkSemaphoreWait(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreWaitParams extSemaphoreWaitParams;
memset(&extSemaphoreWaitParams, 0, sizeof(extSemaphoreWaitParams));
extSemaphoreWaitParams.params.fence.value = 0;
extSemaphoreWaitParams.flags = 0;
checkCudaErrors(cudaWaitExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreWaitParams, 1, streamToRun));
}
void cudaUpdateVertexBuffer() {
cudaVkSemaphoreWait(cudaExtVkUpdateCudaVertexBufSemaphore);
dim3 block(16, 16, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
Vertex* pos = (Vertex*)cudaDevVertptr;
AnimTime += 0.01f;
sinewave_gen_kernel<<<grid, block, 0, streamToRun>>>(pos, mesh_width,
mesh_height, AnimTime);
cudaVkSemaphoreSignal(cudaExtCudaUpdateVkVertexBufSemaphore);
}
void cleanup() {
if (enableValidationLayers) {
DestroyDebugReportCallbackEXT(instance, callback, nullptr);
}
vkDestroySemaphore(device, renderFinishedSemaphore, nullptr);
vkDestroySemaphore(device, imageAvailableSemaphore, nullptr);
checkCudaErrors(
cudaDestroyExternalSemaphore(cudaExtCudaUpdateVkVertexBufSemaphore));
vkDestroySemaphore(device, cudaUpdateVkVertexBufSemaphore, nullptr);
checkCudaErrors(
cudaDestroyExternalSemaphore(cudaExtVkUpdateCudaVertexBufSemaphore));
vkDestroySemaphore(device, vkUpdateCudaVertexBufSemaphore, nullptr);
vkDestroyCommandPool(device, commandPool, nullptr);
for (auto framebuffer : swapChainFramebuffers) {
vkDestroyFramebuffer(device, framebuffer, nullptr);
}
for (auto imageView : swapChainImageViews) {
vkDestroyImageView(device, imageView, nullptr);
}
vkDestroyPipeline(device, graphicsPipeline, nullptr);
vkDestroyPipelineLayout(device, pipelineLayout, nullptr);
vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr);
vkDestroyBuffer(device, uniformBuffer, nullptr);
vkFreeMemory(device, uniformBufferMemory, nullptr);
vkDestroyRenderPass(device, renderPass, nullptr);
vkDestroySwapchainKHR(device, swapChain, nullptr);
checkCudaErrors(cudaDestroyExternalMemory(cudaExtMemVertexBuffer));
vkDestroyBuffer(device, vertexBuffer, nullptr);
vkFreeMemory(device, vertexBufferMemory, nullptr);
vkDestroyDescriptorPool(device, descriptorPool, nullptr);
vkDestroyDevice(device, nullptr);
vkDestroySurfaceKHR(instance, surface, nullptr);
vkDestroyInstance(instance, nullptr);
glfwDestroyWindow(window);
glfwTerminate();
}
};
int main(int argc, char* argv[]) {
execution_path = argv[0];
vulkanCudaApp app;
try {
app.run();
} catch (const std::runtime_error& e) {
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
2943ef9c951cae2734d2342f50c44439f950d722.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "BilinearResampleSubImageKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
float *subImageDefs = NULL;
hipMalloc(&subImageDefs, XSIZE*YSIZE);
bool safeBounds = 1;
int inputWidth = XSIZE;
int inputHeight = YSIZE;
int outputWidth = XSIZE;
int outputHeight = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
BilinearResampleSubImageKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,subImageDefs,safeBounds,inputWidth,inputHeight,outputWidth,outputHeight);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
BilinearResampleSubImageKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,subImageDefs,safeBounds,inputWidth,inputHeight,outputWidth,outputHeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
BilinearResampleSubImageKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,subImageDefs,safeBounds,inputWidth,inputHeight,outputWidth,outputHeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
2943ef9c951cae2734d2342f50c44439f950d722.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "BilinearResampleSubImageKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
float *subImageDefs = NULL;
cudaMalloc(&subImageDefs, XSIZE*YSIZE);
bool safeBounds = 1;
int inputWidth = XSIZE;
int inputHeight = YSIZE;
int outputWidth = XSIZE;
int outputHeight = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
BilinearResampleSubImageKernel<<<gridBlock,threadBlock>>>(input,output,subImageDefs,safeBounds,inputWidth,inputHeight,outputWidth,outputHeight);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
BilinearResampleSubImageKernel<<<gridBlock,threadBlock>>>(input,output,subImageDefs,safeBounds,inputWidth,inputHeight,outputWidth,outputHeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
BilinearResampleSubImageKernel<<<gridBlock,threadBlock>>>(input,output,subImageDefs,safeBounds,inputWidth,inputHeight,outputWidth,outputHeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
40d961af510879691348aeaf2065bb4a16795c57.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
//if(l.c ==3 && i > 5) state.input = *net.input_gpu;
l.forward_gpu(l, state);
state.input = l.output_gpu;
if(l.truth) state.truth = l.output_gpu;
}
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if(l.stopbackward) break;
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate*l.learning_rate_scale, net.momentum, net.decay);
}
}
}
void harmless_update_network_gpu(network net)
{
net.learning_rate = 0;
net.momentum = 1;
update_network_gpu(net);
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
if(!net.notruth) *net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
if(!net.notruth) cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu(net, state);
backward_network_gpu(net, state);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
#endif
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate*l.learning_rate_scale, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
////printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
////printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
#else
void sync_nets(network *nets, int n, int interval)
{
error("Sync_nets unsupported in Windows");
}
#endif
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
////printf("%f\n", errors[i]);
sum += errors[i];
}
//hipDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
//printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
//printf("Done!\n");
}
//hipDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
#else
error("Training for GPUs > 1 not supported in Windows...");
return 0.f;
#endif
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
cuda_free(state.input);
return out;
}
|
40d961af510879691348aeaf2065bb4a16795c57.cu
|
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
//if(l.c ==3 && i > 5) state.input = *net.input_gpu;
l.forward_gpu(l, state);
state.input = l.output_gpu;
if(l.truth) state.truth = l.output_gpu;
}
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if(l.stopbackward) break;
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate*l.learning_rate_scale, net.momentum, net.decay);
}
}
}
void harmless_update_network_gpu(network net)
{
net.learning_rate = 0;
net.momentum = 1;
update_network_gpu(net);
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
if(!net.notruth) *net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
if(!net.notruth) cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
forward_network_gpu(net, state);
backward_network_gpu(net, state);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
#endif
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate*l.learning_rate_scale, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
////printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
////printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
#else
void sync_nets(network *nets, int n, int interval)
{
error("Sync_nets unsupported in Windows");
}
#endif
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
#if defined __linux__ || defined __APPLE__ || defined PTHREAD_WINDOWS
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
////printf("%f\n", errors[i]);
sum += errors[i];
}
//cudaDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
//printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
//printf("Done!\n");
}
//cudaDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
#else
error("Training for GPUs > 1 not supported in Windows...");
return 0.f;
#endif
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
state.input = cuda_make_array(input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
cuda_free(state.input);
return out;
}
|
ProjCoreCuda.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ProjHelperFun.h"
#include "ProjCoreCUDACores.cu.h"
#include "CudaUtilProj.cu.h"
#include "Constants.h"
// CUDA error checking macros; taken from http://choorucode.com/2011/03/02/how-to-do-error-checking-in-cuda/
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
void updateParams(const unsigned g, const REAL alpha, const REAL beta, const REAL nu, PrivGlobs& globs)
{
for(unsigned i=0;i<globs.myX.size();++i)
for(unsigned j=0;j<globs.myY.size();++j) {
globs.myVarX[i][j] = exp(2.0*( beta*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
);
globs.myVarY[i][j] = exp(2.0*( alpha*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
); // nu*nu
if (i == 52 && j == 253) {
printf("non-cuda: %.10f %.10f %.10f\n", globs.myX[i], globs.myY[j], globs.myTimeline[g]);
printf("non-cuda: %.10f %.10f\n", log(globs.myX[i]), globs.myVarY[i][j]);
}
}
}
void setPayoff(const REAL strike, PrivGlobs& globs )
{
for(unsigned i=0;i<globs.myX.size();++i)
{
REAL payoff = max(globs.myX[i]-strike, (REAL)0.0);
for(unsigned j=0;j<globs.myY.size();++j)
globs.myResult[i][j] = payoff;
}
}
/*
inline void new_amazing_tridag(
const vector<REAL>& a, // size [n]
const vector<REAL>& b, // size [n]
const vector<REAL>& c, // size [n]
const vector<REAL>& r, // size [n]
const int n,
vector<REAL>& u, // size [n]
vector<REAL>& uu // size [n] temporary
) {
unsigned int block_size = 128;
// generate S_i matrices
// TODO
// compute S_i * S_(i-1) * S_1 matrices
scanInc<Mat2Mult, matrix>(block_size, n, d_s, d_tmpmat);
}
*/
void run_cuda(
const unsigned int& outer,
const unsigned int& numX,
const unsigned int& numY,
const unsigned int& numT,
const REAL& s0,
const REAL& t,
const REAL& alpha,
const REAL& nu,
const REAL& beta,
REAL* res // [outer] RESULT
) {
// grid
//REAL myX[numX];
//REAL myY[numY];
//REAL myTimeline[numT];
unsigned int myXindex;
unsigned int myYindex;
// variable
//REAL myResult[outer][numX][numY];
// coeffs
//REAL myVarX[outer][numX][numY];
//REAL myVarY[outer][numX][numY];
// operators
//REAL myDxx[numX][4];
//REAL myDyy[numY][4];
#if DO_DEBUG
#define TDIMX 9
#define TDIMY 20
#define TDIMZ 10
REAL testMatrix[TDIMX][TDIMY][TDIMZ];
REAL expectedMatrix[TDIMX][TDIMZ][TDIMY];
REAL testMatrix_t[TDIMX][TDIMZ][TDIMY] = {0};
int cnt = 1;
for (int i = 0; i < TDIMX; i++) {
for (int j = 0; j < TDIMY; j++) {
for (int k = 0; k < TDIMZ; k++) {
testMatrix[i][j][k] = cnt;
expectedMatrix[i][k][j] = cnt;
cnt += 1;
}}}
REAL *testMatrix_d, *testMatrix_t_d;
CudaSafeCall( hipMalloc( (void **) &testMatrix_d, TDIMX*TDIMY*TDIMZ*sizeof(REAL) ) );
CudaSafeCall( hipMalloc( (void **) &testMatrix_t_d, TDIMX*TDIMZ*TDIMY*sizeof(REAL) ) );
hipMemcpy(testMatrix_d, testMatrix, TDIMX*TDIMZ*TDIMY*sizeof(REAL), hipMemcpyHostToDevice);
transpose3d(testMatrix_d, testMatrix_t_d, TDIMX, TDIMY, TDIMZ);
hipMemcpy(testMatrix_t, testMatrix_t_d, TDIMX*TDIMZ*TDIMY*sizeof(REAL), hipMemcpyDeviceToHost);
printf("Got:\n");
for (int i = 0; i < TDIMX; i++) {
for (int j = 0; j < TDIMZ; j++) {
for (int k = 0; k < TDIMY; k++) {
printf("%.0f ", testMatrix_t[i][j][k]);
}
printf("\n");
}
printf("\n");
}
printf("Expected:\n");
for (int i = 0; i < TDIMX; i++) {
for (int j = 0; j < TDIMZ; j++) {
for (int k = 0; k < TDIMY; k++) {
printf("%.0f ", expectedMatrix[i][j][k]);
}
printf("\n");
}
printf("\n");
}
printf("Matrix verificeret\n");
#endif
const REAL stdX = 20.0*alpha*s0*sqrt(t);
const REAL dx = stdX/numX;
myXindex = static_cast<unsigned>(s0/dx) % numX;
const REAL stdY = 10.0*nu*sqrt(t);
const REAL dy = stdY/numY;
const REAL logAlpha = log(alpha);
myYindex = static_cast<unsigned>(numY/2.0);
unsigned int numZ = max(numX, numY);
// Allocate CUDA resources
REAL *myX_d, *myY_d, *myTimeline_d, *myDxx_d, *myDyy_d, *myResult_d, *myVarX_d, *myVarY_d, *res_d;
CudaSafeCall( hipMalloc((void **) &myX_d, numX * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &myY_d, numY * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &myTimeline_d, numT * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &myDxx_d, numX * 4 * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &myDyy_d, numY * 4 * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &myResult_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &myVarX_d, numX * numY * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &myVarY_d, numX * numY * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &res_d, outer * sizeof(REAL)) );
// Allocate transposed resources
REAL *myDxx_t_d, *myDyy_t_d, *myResult_t_d;
CudaSafeCall( hipMalloc((void **) &myDxx_t_d, numX * 4 * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &myDyy_t_d, numY * 4 * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &myResult_t_d, outer * numZ * numZ * sizeof(REAL)) );
#define TILE2 32
#define TILE3 8
const dim3 block_size2 = dim3(TILE2, TILE2);
const dim3 block_size3 = dim3(TILE3, TILE3, TILE3);
const int block_size = block_size2.x * block_size2.y * block_size2.z;
#define GRID(first,second) dim3(ceil((REAL)(first)/block_size2.x), ceil((REAL)(second)/block_size2.y))
#define GRID3(first,second,third) dim3(ceil((REAL)(first)/block_size3.x), ceil((REAL)(second)/block_size3.y), ceil((REAL)(third)/block_size3.y))
//CudaSafeCall(hipMemcpy(myX_d, myX, outer * numX * sizeof(REAL), hipMemcpyHostToDevice));
//CudaSafeCall(hipMemcpy(myY_d, myY, outer * numY * sizeof(REAL), hipMemcpyHostToDevice));
//CudaSafeCall(hipMemcpy(myTimeline_d, myTimeline, outer * numT * sizeof(REAL), hipMemcpyHostToDevice));
unsigned int maxXYT = max(numX, max(numY, numT));
hipLaunchKernelGGL(( initGrid_kernel), dim3(ceil((REAL)maxXYT/block_size)), dim3(block_size), 0, 0, s0, logAlpha, dx, dy, myXindex, myYindex, t,
numX, numY, numT, myTimeline_d, myX_d, myY_d); // 1D
CudaCheckError();
#if DO_DEBUG
PrivGlobs globs(numX, numY, numT);
initGrid(s0, alpha, nu, t, numX, numY, numT, globs);
REAL *myX, *myY, *myTimeline;
myX = (REAL*) malloc(numX * sizeof(REAL));
myY = (REAL*) malloc(numY * sizeof(REAL));
myTimeline = (REAL*) malloc(numT * sizeof(REAL));
hipMemcpy(myX, myX_d, numX * sizeof(REAL), hipMemcpyDeviceToHost);
hipMemcpy(myY, myY_d, numY * sizeof(REAL), hipMemcpyDeviceToHost);
hipMemcpy(myTimeline, myTimeline_d, numT * sizeof(REAL), hipMemcpyDeviceToHost);
for (int x = 0; x < numX; ++x) {
REAL x1 = globs.myX[x];
REAL x2 = myX[x];
if (abs(x1-x2) >= 1e-7) {
printf("myX(%d), %.14f, %.14f, %.14f\n", x, abs(x1-x2), x1, x2);
}
}
for (int i = 0; i < numX; i++) {
globs.myX[i] = myX[i];
}
for (int i = 0; i < numY; i++) {
globs.myY[i] = myY[i];
}
for (int i = 0; i < numT; i++) {
globs.myTimeline[i] = myTimeline[i];
}
initOperator(globs.myX,globs.myDxx);
initOperator(globs.myY,globs.myDyy);
#endif
hipLaunchKernelGGL(( initOperator_kernel), dim3(ceil((REAL)numX/block_size)), dim3(block_size), 0, 0, myX_d, myDxx_t_d, numX); // 1D
CudaCheckError();
hipLaunchKernelGGL(( initOperator_kernel), dim3(ceil((REAL)numY/block_size)), dim3(block_size), 0, 0, myY_d, myDyy_t_d, numY); // 1D
CudaCheckError();
#if DO_DEBUG
transpose<REAL,32>(myDxx_t_d, myDxx_d, 4, numX);
transpose<REAL,32>(myDyy_t_d, myDyy_d, 4, numY);
REAL *myDxx;
myDxx = (REAL*) malloc(numX * 4 * sizeof(REAL));
hipMemcpy(myDxx, myDxx_d, numX * 4 * sizeof(REAL), hipMemcpyDeviceToHost);
for (int x = 0; x < numX; ++x) {
for (int i = 0; i < 4; ++i) {
REAL x1 = globs.myDxx[x][i];
REAL x2 = myDxx[IDX2(numX,4, x,i)];
if (abs(x1-x2) >= 1e-10) {
printf("myDxx(%d,%d), %.14f, %.14f, %.14f\n", x, i, abs(x1-x2), x1, x2);
}
}
}
printf("Initoperator checked.\n");
#endif
hipLaunchKernelGGL(( setPayoff_kernel), dim3(GRID(numY, numX)), dim3(block_size2), 0, 0, myX_d, myY_d, myResult_d, numX, numY, numZ, outer); // 2D
CudaCheckError();
#if DO_DEBUG
REAL *myResult;
myResult = (REAL*) malloc(outer * numZ * numZ * sizeof(REAL));
hipMemcpy(myResult, myResult_d, outer * numZ * numZ * sizeof(REAL), hipMemcpyDeviceToHost);
CudaCheckError();
setPayoff(0.001 * 7, globs);
for (int x = 0; x < numX; ++x) {
for (int y = 0; y < numY; ++y) {
REAL x1 = globs.myResult[x][y];
REAL x2 = myResult[IDX3(outer,numZ,numZ, 7,x,y)];
if (abs(x1-x2) >= 1e-10) {
printf("myResult(%d,%d,%d), %.14f, %.14f, %.14f\n", 7, x, y, abs(x1-x2), x1, x2);
}
}
}
printf("setPayoff checked.\n");
#endif
// stuff
//REAL u[outer][numY][numX];
//REAL v[outer][numX][numY];
//REAL a[outer][numZ][numZ];
//REAL b[outer][numZ][numZ];
//REAL c[outer][numZ][numZ];
//REAL y[outer][numZ][numZ];
//REAL yy[outer][numZ][numZ];
REAL *u_d, *v_d, *a_d, *b_d, *c_d, *y_d, *yy_d;
CudaSafeCall( hipMalloc((void **) &u_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &v_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &a_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &b_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &c_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &y_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &yy_d, outer * numZ * numZ * sizeof(REAL)) );
// Transposed
REAL *u_t_d, *a_t_d, *b_t_d, *c_t_d, *y_t_d;
CudaSafeCall( hipMalloc((void **) &u_t_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &a_t_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &b_t_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &c_t_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( hipMalloc((void **) &y_t_d, outer * numZ * numZ * sizeof(REAL)) );
for (int j = numT-2; j>=0; --j) {
#if DO_DEBUG
hipDeviceSynchronize();
printf("time step %d\n", j);
#endif
hipLaunchKernelGGL(( updateParams_large_kernel), dim3(GRID(numY, numX)), dim3(block_size2), 0, 0, j, alpha, beta, nu, numX, numY,
numT, myX_d, myY_d, myVarX_d, myVarY_d, myTimeline_d); // 2D
CudaCheckError();
#if DO_DEBUG
if (j == numT-2) {
REAL *myVarX, *myVarY;
myVarX = (REAL*) malloc(numX * numY * sizeof(REAL));
myVarY = (REAL*) malloc(numX * numY * sizeof(REAL));
hipMemcpy(myVarX, myVarX_d, numX * numY * sizeof(REAL), hipMemcpyDeviceToHost);
hipMemcpy(myVarY, myVarY_d, numX * numY * sizeof(REAL), hipMemcpyDeviceToHost);
CudaCheckError();
updateParams(j, alpha, beta, nu, globs);
for (int x = 52; x < 53; ++x) {
for (int y = 253; y < 254; ++y) {
REAL x1 = globs.myVarX[x][y];
REAL x2 = myVarX[IDX2(numX,numY, x,y)];
if (abs(x1-x2) >= 1e-7) {
printf("myVarX(%d,%d), %.14f, %.14f, %.14f\n", x, y, abs(x1-x2), x1, x2);
}
x1 = globs.myVarY[x][y];
x2 = myVarY[IDX2(numX,numY, x,y)];
if (abs(x1-x2) >= 1e-7) {
printf("myVarY(%d,%d), %.14f, %.14f, %.14f\n", x, y, abs(x1-x2), x1, x2);
}
}
}
printf("updateParams checked.\n");
}
#endif
#ifdef DO_DEBUG_2
REAL *u2, *u;
u = (REAL *) malloc(outer * numZ * numZ * sizeof(REAL));
u2 = (REAL *) malloc(outer * numZ * numZ * sizeof(REAL));
#endif
hipLaunchKernelGGL(( rollback_explicit_x_kernel), dim3(GRID(numY, numX)), dim3(block_size2), 0, 0, outer, numX, numY, numT, numZ, j, u_t_d,
myTimeline_d, myVarX_d, myDxx_t_d, myResult_d); // 2D
CudaCheckError();
#ifdef DO_DEBUG_2
hipMemcpy(u2, u_t_d, outer * numZ * numZ * sizeof(REAL), hipMemcpyDeviceToHost);
printf("u[0][20][40] = %.12f\n", u2[IDX3(outer,numZ,numZ,0,40,20)]);
#endif
hipLaunchKernelGGL(( rollback_explicit_y_kernel), dim3(GRID(numY, numX)), dim3(block_size2), 0, 0, outer, numX, numY, numZ, u_t_d, v_d,
myTimeline_d, myVarY_d, myDyy_t_d, myResult_d); // 2D
CudaCheckError();
#ifdef DO_DEBUG_2
REAL *u2_d;
CudaSafeCall( hipMalloc((void **) &u2_d, outer * numZ * numZ * sizeof(REAL)) );
transpose3d(u_t_d, u2_d, outer, numZ, numZ);
hipMemcpy(u2, u2_d, outer * numZ * numZ * sizeof(REAL), hipMemcpyDeviceToHost);
printf("u[0][20][40] = %.12f\n", u2[IDX3(outer,numZ,numZ,0,20,40)]);
#endif
hipLaunchKernelGGL(( rollback_implicit_x_kernel), dim3(GRID(numY, numX)), dim3(block_size2), 0, 0, outer, numX, numY, numZ, numT, j,
myTimeline_d, myVarX_d, myDxx_t_d, a_d, b_d, c_d);
CudaCheckError();
hipLaunchKernelGGL(( rollback_implicit_x_part2_kernel), dim3(GRID(outer, numY)), dim3(block_size2), 0, 0, outer, numX, numY, numZ, u_t_d,
a_d, b_d, c_d, yy_d); // 2D
CudaCheckError();
#ifdef DO_DEBUG_2
hipLaunchKernelGGL(( rollback_implicit_x_old_kernel), dim3(GRID(outer, numY)), dim3(block_size2), 0, 0,
outer, numX, numY, numZ, numT, j, myTimeline_d, myVarX_d, myDxx_d, u2_d,
a_d, b_d, c_d, yy_d
);
hipMemcpy(u , u_t_d , outer * numZ * numZ * sizeof(REAL), hipMemcpyDeviceToHost);
hipMemcpy(u2, u2_d, outer * numZ * numZ * sizeof(REAL), hipMemcpyDeviceToHost);
for (int x = 0; x < outer; x++) {
for (int y = 0; y < numZ; y++) {
for (int z = 0; z < numZ; z++) {
REAL x1 = u[IDX3(outer,numZ,numZ, x,z,y)];
REAL x2 = u2[IDX3(outer,numZ,numZ, x,y,z)];
if (abs(x1-x2) >= 1e-10) {
printf("u(%d,%d,%d), %.14f, ex %.14f, got %.14f\n", x, y, z, abs(x1-x2), x2, x1);
}
}
}
}
free(u2);
free(u);
hipFree(u2_d);
#endif
hipLaunchKernelGGL(( rollback_implicit_y_kernel), dim3(GRID(numY, numX)), dim3(block_size2), 0, 0, outer, numX, numY, numZ, numT, j,
myTimeline_d, myVarY_d, myDyy_t_d, myResult_d, u_t_d, v_d, a_d, b_d, c_d, y_d); // 2D
CudaCheckError();
transpose3d(myResult_d, myResult_t_d, outer, numZ, numZ);
transpose3d(a_d, a_t_d, outer, numZ, numZ);
transpose3d(b_d, b_t_d, outer, numZ, numZ);
transpose3d(c_d, c_t_d, outer, numZ, numZ);
transpose3d(y_d, y_t_d, outer, numZ, numZ);
hipLaunchKernelGGL(( rollback_implicit_y_part2_kernel), dim3(GRID(outer, numX)), dim3(block_size2), 0, 0, outer, numX, numY, numZ, numT, j, myTimeline_d,
myVarY_d, myDyy_d, myResult_t_d, u_t_d, v_d, a_t_d,
b_t_d, c_t_d, y_t_d, yy_d); // 2D
CudaCheckError();
transpose3d(myResult_t_d, myResult_d, outer, numZ, numZ);
}
hipLaunchKernelGGL(( res_kernel), dim3(ceil((REAL)outer/block_size)), dim3(block_size), 0, 0, res_d, myResult_d, outer, numX, numY, numZ, myXindex, myYindex);
hipDeviceSynchronize();
hipMemcpy(res, res_d, outer * sizeof(REAL), hipMemcpyDeviceToHost);
// XXX: free everything maybe
}
//#endif // PROJ_CORE_ORIG
|
ProjCoreCuda.cu
|
#include "ProjHelperFun.h"
#include "ProjCoreCUDACores.cu.h"
#include "CudaUtilProj.cu.h"
#include "Constants.h"
// CUDA error checking macros; taken from http://choorucode.com/2011/03/02/how-to-do-error-checking-in-cuda/
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
void updateParams(const unsigned g, const REAL alpha, const REAL beta, const REAL nu, PrivGlobs& globs)
{
for(unsigned i=0;i<globs.myX.size();++i)
for(unsigned j=0;j<globs.myY.size();++j) {
globs.myVarX[i][j] = exp(2.0*( beta*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
);
globs.myVarY[i][j] = exp(2.0*( alpha*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
); // nu*nu
if (i == 52 && j == 253) {
printf("non-cuda: %.10f %.10f %.10f\n", globs.myX[i], globs.myY[j], globs.myTimeline[g]);
printf("non-cuda: %.10f %.10f\n", log(globs.myX[i]), globs.myVarY[i][j]);
}
}
}
void setPayoff(const REAL strike, PrivGlobs& globs )
{
for(unsigned i=0;i<globs.myX.size();++i)
{
REAL payoff = max(globs.myX[i]-strike, (REAL)0.0);
for(unsigned j=0;j<globs.myY.size();++j)
globs.myResult[i][j] = payoff;
}
}
/*
inline void new_amazing_tridag(
const vector<REAL>& a, // size [n]
const vector<REAL>& b, // size [n]
const vector<REAL>& c, // size [n]
const vector<REAL>& r, // size [n]
const int n,
vector<REAL>& u, // size [n]
vector<REAL>& uu // size [n] temporary
) {
unsigned int block_size = 128;
// generate S_i matrices
// TODO
// compute S_i * S_(i-1) * S_1 matrices
scanInc<Mat2Mult, matrix>(block_size, n, d_s, d_tmpmat);
}
*/
void run_cuda(
const unsigned int& outer,
const unsigned int& numX,
const unsigned int& numY,
const unsigned int& numT,
const REAL& s0,
const REAL& t,
const REAL& alpha,
const REAL& nu,
const REAL& beta,
REAL* res // [outer] RESULT
) {
// grid
//REAL myX[numX];
//REAL myY[numY];
//REAL myTimeline[numT];
unsigned int myXindex;
unsigned int myYindex;
// variable
//REAL myResult[outer][numX][numY];
// coeffs
//REAL myVarX[outer][numX][numY];
//REAL myVarY[outer][numX][numY];
// operators
//REAL myDxx[numX][4];
//REAL myDyy[numY][4];
#if DO_DEBUG
#define TDIMX 9
#define TDIMY 20
#define TDIMZ 10
REAL testMatrix[TDIMX][TDIMY][TDIMZ];
REAL expectedMatrix[TDIMX][TDIMZ][TDIMY];
REAL testMatrix_t[TDIMX][TDIMZ][TDIMY] = {0};
int cnt = 1;
for (int i = 0; i < TDIMX; i++) {
for (int j = 0; j < TDIMY; j++) {
for (int k = 0; k < TDIMZ; k++) {
testMatrix[i][j][k] = cnt;
expectedMatrix[i][k][j] = cnt;
cnt += 1;
}}}
REAL *testMatrix_d, *testMatrix_t_d;
CudaSafeCall( cudaMalloc( (void **) &testMatrix_d, TDIMX*TDIMY*TDIMZ*sizeof(REAL) ) );
CudaSafeCall( cudaMalloc( (void **) &testMatrix_t_d, TDIMX*TDIMZ*TDIMY*sizeof(REAL) ) );
cudaMemcpy(testMatrix_d, testMatrix, TDIMX*TDIMZ*TDIMY*sizeof(REAL), cudaMemcpyHostToDevice);
transpose3d(testMatrix_d, testMatrix_t_d, TDIMX, TDIMY, TDIMZ);
cudaMemcpy(testMatrix_t, testMatrix_t_d, TDIMX*TDIMZ*TDIMY*sizeof(REAL), cudaMemcpyDeviceToHost);
printf("Got:\n");
for (int i = 0; i < TDIMX; i++) {
for (int j = 0; j < TDIMZ; j++) {
for (int k = 0; k < TDIMY; k++) {
printf("%.0f ", testMatrix_t[i][j][k]);
}
printf("\n");
}
printf("\n");
}
printf("Expected:\n");
for (int i = 0; i < TDIMX; i++) {
for (int j = 0; j < TDIMZ; j++) {
for (int k = 0; k < TDIMY; k++) {
printf("%.0f ", expectedMatrix[i][j][k]);
}
printf("\n");
}
printf("\n");
}
printf("Matrix verificeret\n");
#endif
const REAL stdX = 20.0*alpha*s0*sqrt(t);
const REAL dx = stdX/numX;
myXindex = static_cast<unsigned>(s0/dx) % numX;
const REAL stdY = 10.0*nu*sqrt(t);
const REAL dy = stdY/numY;
const REAL logAlpha = log(alpha);
myYindex = static_cast<unsigned>(numY/2.0);
unsigned int numZ = max(numX, numY);
// Allocate CUDA resources
REAL *myX_d, *myY_d, *myTimeline_d, *myDxx_d, *myDyy_d, *myResult_d, *myVarX_d, *myVarY_d, *res_d;
CudaSafeCall( cudaMalloc((void **) &myX_d, numX * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &myY_d, numY * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &myTimeline_d, numT * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &myDxx_d, numX * 4 * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &myDyy_d, numY * 4 * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &myResult_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &myVarX_d, numX * numY * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &myVarY_d, numX * numY * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &res_d, outer * sizeof(REAL)) );
// Allocate transposed resources
REAL *myDxx_t_d, *myDyy_t_d, *myResult_t_d;
CudaSafeCall( cudaMalloc((void **) &myDxx_t_d, numX * 4 * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &myDyy_t_d, numY * 4 * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &myResult_t_d, outer * numZ * numZ * sizeof(REAL)) );
#define TILE2 32
#define TILE3 8
const dim3 block_size2 = dim3(TILE2, TILE2);
const dim3 block_size3 = dim3(TILE3, TILE3, TILE3);
const int block_size = block_size2.x * block_size2.y * block_size2.z;
#define GRID(first,second) dim3(ceil((REAL)(first)/block_size2.x), ceil((REAL)(second)/block_size2.y))
#define GRID3(first,second,third) dim3(ceil((REAL)(first)/block_size3.x), ceil((REAL)(second)/block_size3.y), ceil((REAL)(third)/block_size3.y))
//CudaSafeCall(cudaMemcpy(myX_d, myX, outer * numX * sizeof(REAL), cudaMemcpyHostToDevice));
//CudaSafeCall(cudaMemcpy(myY_d, myY, outer * numY * sizeof(REAL), cudaMemcpyHostToDevice));
//CudaSafeCall(cudaMemcpy(myTimeline_d, myTimeline, outer * numT * sizeof(REAL), cudaMemcpyHostToDevice));
unsigned int maxXYT = max(numX, max(numY, numT));
initGrid_kernel<<<ceil((REAL)maxXYT/block_size), block_size>>>(s0, logAlpha, dx, dy, myXindex, myYindex, t,
numX, numY, numT, myTimeline_d, myX_d, myY_d); // 1D
CudaCheckError();
#if DO_DEBUG
PrivGlobs globs(numX, numY, numT);
initGrid(s0, alpha, nu, t, numX, numY, numT, globs);
REAL *myX, *myY, *myTimeline;
myX = (REAL*) malloc(numX * sizeof(REAL));
myY = (REAL*) malloc(numY * sizeof(REAL));
myTimeline = (REAL*) malloc(numT * sizeof(REAL));
cudaMemcpy(myX, myX_d, numX * sizeof(REAL), cudaMemcpyDeviceToHost);
cudaMemcpy(myY, myY_d, numY * sizeof(REAL), cudaMemcpyDeviceToHost);
cudaMemcpy(myTimeline, myTimeline_d, numT * sizeof(REAL), cudaMemcpyDeviceToHost);
for (int x = 0; x < numX; ++x) {
REAL x1 = globs.myX[x];
REAL x2 = myX[x];
if (abs(x1-x2) >= 1e-7) {
printf("myX(%d), %.14f, %.14f, %.14f\n", x, abs(x1-x2), x1, x2);
}
}
for (int i = 0; i < numX; i++) {
globs.myX[i] = myX[i];
}
for (int i = 0; i < numY; i++) {
globs.myY[i] = myY[i];
}
for (int i = 0; i < numT; i++) {
globs.myTimeline[i] = myTimeline[i];
}
initOperator(globs.myX,globs.myDxx);
initOperator(globs.myY,globs.myDyy);
#endif
initOperator_kernel<<<ceil((REAL)numX/block_size), block_size>>>(myX_d, myDxx_t_d, numX); // 1D
CudaCheckError();
initOperator_kernel<<<ceil((REAL)numY/block_size), block_size>>>(myY_d, myDyy_t_d, numY); // 1D
CudaCheckError();
#if DO_DEBUG
transpose<REAL,32>(myDxx_t_d, myDxx_d, 4, numX);
transpose<REAL,32>(myDyy_t_d, myDyy_d, 4, numY);
REAL *myDxx;
myDxx = (REAL*) malloc(numX * 4 * sizeof(REAL));
cudaMemcpy(myDxx, myDxx_d, numX * 4 * sizeof(REAL), cudaMemcpyDeviceToHost);
for (int x = 0; x < numX; ++x) {
for (int i = 0; i < 4; ++i) {
REAL x1 = globs.myDxx[x][i];
REAL x2 = myDxx[IDX2(numX,4, x,i)];
if (abs(x1-x2) >= 1e-10) {
printf("myDxx(%d,%d), %.14f, %.14f, %.14f\n", x, i, abs(x1-x2), x1, x2);
}
}
}
printf("Initoperator checked.\n");
#endif
setPayoff_kernel<<<GRID(numY, numX), block_size2>>>(myX_d, myY_d, myResult_d, numX, numY, numZ, outer); // 2D
CudaCheckError();
#if DO_DEBUG
REAL *myResult;
myResult = (REAL*) malloc(outer * numZ * numZ * sizeof(REAL));
cudaMemcpy(myResult, myResult_d, outer * numZ * numZ * sizeof(REAL), cudaMemcpyDeviceToHost);
CudaCheckError();
setPayoff(0.001 * 7, globs);
for (int x = 0; x < numX; ++x) {
for (int y = 0; y < numY; ++y) {
REAL x1 = globs.myResult[x][y];
REAL x2 = myResult[IDX3(outer,numZ,numZ, 7,x,y)];
if (abs(x1-x2) >= 1e-10) {
printf("myResult(%d,%d,%d), %.14f, %.14f, %.14f\n", 7, x, y, abs(x1-x2), x1, x2);
}
}
}
printf("setPayoff checked.\n");
#endif
// stuff
//REAL u[outer][numY][numX];
//REAL v[outer][numX][numY];
//REAL a[outer][numZ][numZ];
//REAL b[outer][numZ][numZ];
//REAL c[outer][numZ][numZ];
//REAL y[outer][numZ][numZ];
//REAL yy[outer][numZ][numZ];
REAL *u_d, *v_d, *a_d, *b_d, *c_d, *y_d, *yy_d;
CudaSafeCall( cudaMalloc((void **) &u_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &v_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &a_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &b_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &c_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &y_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &yy_d, outer * numZ * numZ * sizeof(REAL)) );
// Transposed
REAL *u_t_d, *a_t_d, *b_t_d, *c_t_d, *y_t_d;
CudaSafeCall( cudaMalloc((void **) &u_t_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &a_t_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &b_t_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &c_t_d, outer * numZ * numZ * sizeof(REAL)) );
CudaSafeCall( cudaMalloc((void **) &y_t_d, outer * numZ * numZ * sizeof(REAL)) );
for (int j = numT-2; j>=0; --j) {
#if DO_DEBUG
cudaDeviceSynchronize();
printf("time step %d\n", j);
#endif
updateParams_large_kernel<<<GRID(numY, numX), block_size2>>>(j, alpha, beta, nu, numX, numY,
numT, myX_d, myY_d, myVarX_d, myVarY_d, myTimeline_d); // 2D
CudaCheckError();
#if DO_DEBUG
if (j == numT-2) {
REAL *myVarX, *myVarY;
myVarX = (REAL*) malloc(numX * numY * sizeof(REAL));
myVarY = (REAL*) malloc(numX * numY * sizeof(REAL));
cudaMemcpy(myVarX, myVarX_d, numX * numY * sizeof(REAL), cudaMemcpyDeviceToHost);
cudaMemcpy(myVarY, myVarY_d, numX * numY * sizeof(REAL), cudaMemcpyDeviceToHost);
CudaCheckError();
updateParams(j, alpha, beta, nu, globs);
for (int x = 52; x < 53; ++x) {
for (int y = 253; y < 254; ++y) {
REAL x1 = globs.myVarX[x][y];
REAL x2 = myVarX[IDX2(numX,numY, x,y)];
if (abs(x1-x2) >= 1e-7) {
printf("myVarX(%d,%d), %.14f, %.14f, %.14f\n", x, y, abs(x1-x2), x1, x2);
}
x1 = globs.myVarY[x][y];
x2 = myVarY[IDX2(numX,numY, x,y)];
if (abs(x1-x2) >= 1e-7) {
printf("myVarY(%d,%d), %.14f, %.14f, %.14f\n", x, y, abs(x1-x2), x1, x2);
}
}
}
printf("updateParams checked.\n");
}
#endif
#ifdef DO_DEBUG_2
REAL *u2, *u;
u = (REAL *) malloc(outer * numZ * numZ * sizeof(REAL));
u2 = (REAL *) malloc(outer * numZ * numZ * sizeof(REAL));
#endif
rollback_explicit_x_kernel<<<GRID(numY, numX), block_size2>>>(outer, numX, numY, numT, numZ, j, u_t_d,
myTimeline_d, myVarX_d, myDxx_t_d, myResult_d); // 2D
CudaCheckError();
#ifdef DO_DEBUG_2
cudaMemcpy(u2, u_t_d, outer * numZ * numZ * sizeof(REAL), cudaMemcpyDeviceToHost);
printf("u[0][20][40] = %.12f\n", u2[IDX3(outer,numZ,numZ,0,40,20)]);
#endif
rollback_explicit_y_kernel<<<GRID(numY, numX), block_size2>>>(outer, numX, numY, numZ, u_t_d, v_d,
myTimeline_d, myVarY_d, myDyy_t_d, myResult_d); // 2D
CudaCheckError();
#ifdef DO_DEBUG_2
REAL *u2_d;
CudaSafeCall( cudaMalloc((void **) &u2_d, outer * numZ * numZ * sizeof(REAL)) );
transpose3d(u_t_d, u2_d, outer, numZ, numZ);
cudaMemcpy(u2, u2_d, outer * numZ * numZ * sizeof(REAL), cudaMemcpyDeviceToHost);
printf("u[0][20][40] = %.12f\n", u2[IDX3(outer,numZ,numZ,0,20,40)]);
#endif
rollback_implicit_x_kernel<<<GRID(numY, numX), block_size2>>>(outer, numX, numY, numZ, numT, j,
myTimeline_d, myVarX_d, myDxx_t_d, a_d, b_d, c_d);
CudaCheckError();
rollback_implicit_x_part2_kernel<<<GRID(outer, numY), block_size2>>>(outer, numX, numY, numZ, u_t_d,
a_d, b_d, c_d, yy_d); // 2D
CudaCheckError();
#ifdef DO_DEBUG_2
rollback_implicit_x_old_kernel<<<GRID(outer, numY), block_size2>>>(
outer, numX, numY, numZ, numT, j, myTimeline_d, myVarX_d, myDxx_d, u2_d,
a_d, b_d, c_d, yy_d
);
cudaMemcpy(u , u_t_d , outer * numZ * numZ * sizeof(REAL), cudaMemcpyDeviceToHost);
cudaMemcpy(u2, u2_d, outer * numZ * numZ * sizeof(REAL), cudaMemcpyDeviceToHost);
for (int x = 0; x < outer; x++) {
for (int y = 0; y < numZ; y++) {
for (int z = 0; z < numZ; z++) {
REAL x1 = u[IDX3(outer,numZ,numZ, x,z,y)];
REAL x2 = u2[IDX3(outer,numZ,numZ, x,y,z)];
if (abs(x1-x2) >= 1e-10) {
printf("u(%d,%d,%d), %.14f, ex %.14f, got %.14f\n", x, y, z, abs(x1-x2), x2, x1);
}
}
}
}
free(u2);
free(u);
cudaFree(u2_d);
#endif
rollback_implicit_y_kernel<<<GRID(numY, numX), block_size2>>>(outer, numX, numY, numZ, numT, j,
myTimeline_d, myVarY_d, myDyy_t_d, myResult_d, u_t_d, v_d, a_d, b_d, c_d, y_d); // 2D
CudaCheckError();
transpose3d(myResult_d, myResult_t_d, outer, numZ, numZ);
transpose3d(a_d, a_t_d, outer, numZ, numZ);
transpose3d(b_d, b_t_d, outer, numZ, numZ);
transpose3d(c_d, c_t_d, outer, numZ, numZ);
transpose3d(y_d, y_t_d, outer, numZ, numZ);
rollback_implicit_y_part2_kernel<<<GRID(outer, numX), block_size2>>>(outer, numX, numY, numZ, numT, j, myTimeline_d,
myVarY_d, myDyy_d, myResult_t_d, u_t_d, v_d, a_t_d,
b_t_d, c_t_d, y_t_d, yy_d); // 2D
CudaCheckError();
transpose3d(myResult_t_d, myResult_d, outer, numZ, numZ);
}
res_kernel<<<ceil((REAL)outer/block_size), block_size>>>(res_d, myResult_d, outer, numX, numY, numZ, myXindex, myYindex);
cudaDeviceSynchronize();
cudaMemcpy(res, res_d, outer * sizeof(REAL), cudaMemcpyDeviceToHost);
// XXX: free everything maybe
}
//#endif // PROJ_CORE_ORIG
|
e4c9701bc2dbc3e7938da48d1debee4a26433b9d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "LibraryCuda.cuh"
#include "KernelBox.h"
#include "Box.h"
#include "Tube.h"
namespace kernel {
namespace box {
__global__
void ContainsWrapper(const Vector3D<CudaFloat> dimensions,
TransMatrix<CudaFloat> const * const trans_matrix,
SOA3D<double> const points, bool *output) {
const int index = ThreadIndex();
if (index >= points.size()) return;
Vector3D<double> point = points[index];
Contains<kCuda>(dimensions, trans_matrix, DeviceVector(point), output[index]);
}
__global__
void DistanceToInWrapper(const Vector3D<CudaFloat> dimensions,
TransMatrix<CudaFloat> const * const trans_matrix,
SOA3D<double> const pos, SOA3D<double> const dir,
double const *step_max, double *distance) {
const int index = ThreadIndex();
if (index >= pos.size()) return;
CudaFloat dist;
DistanceToIn<kCuda>(dimensions, trans_matrix, DeviceVector(pos[index]),
DeviceVector(dir[index]), CudaFloat(step_max[index]),
dist);
distance[index] = double(dist);
}
} // End namespace box
} // End namespace kernel
void Box::Contains(SOA3D<double> const &points,
bool *output) const {
const LaunchParameters launch(points.size());
hipLaunchKernelGGL(( kernel::box::ContainsWrapper), dim3(launch.grid_size), dim3(launch.block_size), 0, 0,
DeviceVector(parameters->dimensions),
trans_matrix,
points,
output
);
CheckCudaError();
}
void Box::DistanceToIn(SOA3D<double> const &pos,
SOA3D<double> const &dir,
double const *step_max,
double *distance) const {
const LaunchParameters launch(pos.size());
hipLaunchKernelGGL(( kernel::box::DistanceToInWrapper), dim3(launch.grid_size), dim3(launch.block_size), 0, 0,
DeviceVector(parameters->dimensions),
trans_matrix,
pos,
dir,
step_max,
distance
);
CheckCudaError();
}
void Tube::Contains(SOA3D<double> const &points,
bool *output) const {
// NYI
}
void Tube::DistanceToIn(SOA3D<double> const &pos,
SOA3D<double> const &dir,
double const *steps_max,
double *distance) const {
// NYI
}
|
e4c9701bc2dbc3e7938da48d1debee4a26433b9d.cu
|
#include "LibraryCuda.cuh"
#include "KernelBox.h"
#include "Box.h"
#include "Tube.h"
namespace kernel {
namespace box {
__global__
void ContainsWrapper(const Vector3D<CudaFloat> dimensions,
TransMatrix<CudaFloat> const * const trans_matrix,
SOA3D<double> const points, bool *output) {
const int index = ThreadIndex();
if (index >= points.size()) return;
Vector3D<double> point = points[index];
Contains<kCuda>(dimensions, trans_matrix, DeviceVector(point), output[index]);
}
__global__
void DistanceToInWrapper(const Vector3D<CudaFloat> dimensions,
TransMatrix<CudaFloat> const * const trans_matrix,
SOA3D<double> const pos, SOA3D<double> const dir,
double const *step_max, double *distance) {
const int index = ThreadIndex();
if (index >= pos.size()) return;
CudaFloat dist;
DistanceToIn<kCuda>(dimensions, trans_matrix, DeviceVector(pos[index]),
DeviceVector(dir[index]), CudaFloat(step_max[index]),
dist);
distance[index] = double(dist);
}
} // End namespace box
} // End namespace kernel
void Box::Contains(SOA3D<double> const &points,
bool *output) const {
const LaunchParameters launch(points.size());
kernel::box::ContainsWrapper<<<launch.grid_size, launch.block_size>>>(
DeviceVector(parameters->dimensions),
trans_matrix,
points,
output
);
CheckCudaError();
}
void Box::DistanceToIn(SOA3D<double> const &pos,
SOA3D<double> const &dir,
double const *step_max,
double *distance) const {
const LaunchParameters launch(pos.size());
kernel::box::DistanceToInWrapper<<<launch.grid_size, launch.block_size>>>(
DeviceVector(parameters->dimensions),
trans_matrix,
pos,
dir,
step_max,
distance
);
CheckCudaError();
}
void Tube::Contains(SOA3D<double> const &points,
bool *output) const {
// NYI
}
void Tube::DistanceToIn(SOA3D<double> const &pos,
SOA3D<double> const &dir,
double const *steps_max,
double *distance) const {
// NYI
}
|
00c2bbdd81a91e97234bb8980b74c1ec37e2b0ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include "float.h"
#include <hip/hip_runtime.h>
#define HILOS 128
#define PATH "./inputs/randomData_2M_3feature.csv"
#define CANT_FEATURES 3
#define CANT_MEANS 4
#define CANT_ITERACIONES 100
#define MAX_DOUBLE DBL_MAX
//Funciones CUDA
__global__ void kMeansClusterAssignment(double* means_dev, double* items_dev, int *clusterAsignado_dev,int *countChangeItem_dev );
__global__ void kMeansCentroidUpdate(double *items_dev, int *clusterAsignado_dev, double *means_dev, int *d_clust_sizes);
__device__ u_int64_t Classify(double* means_dev, double* item, int cant_means, int cant_features);
__device__ double distanciaEuclidiana(double* x , double* y, int length);
//Funciones HOST
double** CalculateMeans(double* items_dev, double** means, u_int64_t size_lines, int *clusterAsignado_dev, int nBloques, int hilosB);
double*** FindClusters(int *clusterAsignado_dev, u_int64_t cant_items, double **items);
u_int64_t CalcLines(char filename[50]);
double **alloc_2d_double(u_int64_t rows, u_int64_t cols);
double** ReadData(char filename[50], u_int64_t size_lines, u_int8_t cant_features);
void searchMinMax(double** items, u_int64_t size_lines, double* minimo, double* maximo, u_int8_t cant_features);
double** InitializeMeans(u_int16_t cant_means, double* cMin, double* cMax, u_int8_t cant_features);
__host__ void check_CUDA_Error(const char *mensaje);
//Constantes de CUDA
__constant__ u_int64_t CANT_ITEMS_CUDA;
int main()
{
//Declaracion de eventos para tomar tiempos
hipEvent_t start;
hipEvent_t stop;
//Creacion de eventos
hipEventCreate(&start);
hipEventCreate(&stop);
//Marca de inicio CalcLines y ReadData
hipEventRecord(start,0);
//Calcula la cantidad de lineas del CSV
u_int64_t size_lines = CalcLines(PATH);
hipMemcpyToSymbol(CANT_ITEMS_CUDA, &size_lines, sizeof(u_int64_t));
check_CUDA_Error("ERROR en hipMemcpyToSymbol");
// double maxDouble = DBL_MAX;
// hipMemcpyToSymbol(MAX_DOUBLE, &maxDouble, sizeof(double));
// check_CUDA_Error("ERROR en hipMemcpyToSymbol");
double **items = ReadData(PATH, size_lines, CANT_FEATURES);
//Marca de final CalcLines y ReadData
hipEventRecord(stop,0);
//Sincronizacion GPU-CPU
hipEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime2;
hipEventElapsedTime(&elapsedTime2,start,stop);
//Marca de inicio SearchMinMax, Calculo de hilos-bloques CUDA e Inicializacion Medias
hipEventRecord(start,0);
double *cMin, *cMax;
cMin = (double*) malloc(CANT_FEATURES * sizeof(double));
cMax = (double*) malloc(CANT_FEATURES * sizeof(double));
//Encuentra el minimo y maximo de cada columna (o feature)
searchMinMax(items, size_lines, cMin, cMax, CANT_FEATURES);
printf("MIN: %lf, MAX: %lf\n", cMin[0], cMax[0]);
// calculamos el numero de bloques necesario para un tamao de bloque fijo
int nBloques = size_lines/HILOS;
if (size_lines%HILOS != 0)
{
nBloques = nBloques + 1;
}
int hilosB = HILOS;
//Inicializa las means (medias) con valores estimativos
double** means = InitializeMeans(CANT_MEANS, cMin, cMax, CANT_FEATURES);
//Marca de final SearchMinMax, Calculo de hilos-bloques CUDA e Inicializacion Medias
hipEventRecord(stop,0);
//Sincronizacion GPU-CPU
hipEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime3;
hipEventElapsedTime(&elapsedTime3,start,stop);
//Almacena los indices de los items
int *clusterAsignado_dev = 0;
hipMalloc(&clusterAsignado_dev,size_lines*sizeof(int));
hipMemset(clusterAsignado_dev,0,size_lines*sizeof(int));
double* items_dev;
hipMalloc( (void**)&items_dev, size_lines*CANT_FEATURES*sizeof(double));
check_CUDA_Error("ERROR en hipMalloc");
hipMemcpy( items_dev, &items[0][0], size_lines*CANT_FEATURES*sizeof(double), hipMemcpyHostToDevice );
check_CUDA_Error("ERROR en hipMemcpy items_dev");
//Marca de inicio CalculateMeans
hipEventRecord(start,0);
//Funcion que calcula las medias nuevas
means = CalculateMeans(items_dev, means, size_lines, clusterAsignado_dev ,nBloques, hilosB);
//Marca de final CalculateMeans
hipEventRecord(stop,0);
//Sincronizacion GPU-CPU
hipEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime;
hipEventElapsedTime(&elapsedTime,start,stop);
//Marca de inicio FindCluster
hipEventRecord(start,0);
//Funcion que calcula las medias nuevas
double ***clusters = FindClusters(clusterAsignado_dev, size_lines, items);
//Marca de final CalculateMeans
hipEventRecord(stop,0);
//Sincronizacion GPU-CPU
hipEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime4;
hipEventElapsedTime(&elapsedTime4,start,stop);
//Liberacion de recursos
for(int n = 0; n < CANT_MEANS; n++){
for(u_int64_t m = 0; m < size_lines; m++){
free(clusters[n][m]);
}
free(clusters[n]);
}
free(clusters);
free(items[0]);
free(items);
free(means[0]);
free(means);
free(cMin);
free(cMax);
hipFree(clusterAsignado_dev);
hipEventDestroy(start);
hipEventDestroy(stop);
//Impresion de resultados
printf("> Tiempo de ejecucion de CalcLines y ReadData: %f ms\n",elapsedTime2);
printf("> Tiempo de ejecucion de SearchMinMax, Calculo de hilos-bloques CUDA e Inicializacion Medias: %f ms\n",elapsedTime3);
printf("> Tiempo de ejecucion de CalculateMeans: %f ms\n",elapsedTime);
printf("> Tiempo de ejecucion de FindCluster: %f ms\n",elapsedTime4);
printf("> Tiempo de total del programa: %f ms\n", elapsedTime + elapsedTime2 + elapsedTime3 + elapsedTime4);
return EXIT_SUCCESS;
}
/**
* @brief Funcion que se encarga de armar una matriz 3D, donde se insertaran los items de acuerdo a su clasificacion
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
* @param cant_items Cantidad de items
* @param items Items a clasificar
* @return Arreglo 3D de Clusters finales de acuerdo a la clasificacion de los items en cada media
*/
double*** FindClusters(int *clusterAsignado_dev, u_int64_t cant_items, double **items)
{
// clusters es un array de 3 dimensiones, es un conjunto de clusters.
// cada cluster es un conjunto de items.
// cada item es un conjunto de features.
double ***clusters = (double ***) malloc(CANT_MEANS * sizeof(double**));
//Inicializa clusters
for(u_int8_t n = 0; n < CANT_MEANS; n++){
clusters[n] = (double **) malloc(cant_items * sizeof(double*));
for(u_int64_t m = 0; m < cant_items; m++){
clusters[n][m] = (double *) malloc(CANT_FEATURES * sizeof(double));
}
}
int *clusterAsignado = (int*)malloc(cant_items*sizeof(int));
hipMemcpy(clusterAsignado, clusterAsignado_dev, cant_items*sizeof(int), hipMemcpyDeviceToHost );
int indices_series[CANT_MEANS];
memset(indices_series, 0, sizeof(int)*CANT_MEANS);
for(u_int64_t i = 0; i < cant_items; i++){
for(u_int8_t j = 0; j < CANT_FEATURES; j++){ //se cargan todas las features del item al cluster
clusters[clusterAsignado[i]][indices_series[clusterAsignado[i]]][j] = items[i][j];
}
indices_series[clusterAsignado[i]]++;
}
return clusters;
}
/**
* @brief Funcion que se encarga de clasificar los items en las medias correspondientes
* @param items_dev Items a clasificar, cada item contiene un valor por Feature, representada como arreglo 1D
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param size_lines Cantidad de items
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
* @param nBloques Cantidad de bloques CUDA
* @param hilosB Cantidad de hilos CUDA
* @return Arreglo 2D de Medias finales de acuerdo a la clasificacion de los items
*/
double** CalculateMeans(double* items_dev, double** means, u_int64_t size_lines, int *clusterAsignado_dev, int nBloques, int hilosB)
{
double minPorcentaje;
//define el porcentaje minimo de cambio de items entre clusters para que continue la ejecucion del algoritmo
minPorcentaje = 0.001 * (double) size_lines;
printf("Porentaje minimo = %.2lf\n", minPorcentaje);
double* means_dev;
hipMalloc( (void**)&means_dev, CANT_MEANS*CANT_FEATURES*sizeof(double));
check_CUDA_Error("ERROR en hipMalloc");
/*Arreglo de cluster sizes*/
//Creo y reseteo a 0 la variable de host
int *h_clust_sizes = (int*)malloc(CANT_MEANS*sizeof(int));
memset(h_clust_sizes, 0, sizeof(int)*CANT_MEANS);
//hipMemset(countChangeItem_dev, 0, sizeof(int));
//Creo la variable de device
int *d_clust_sizes = 0;
hipMalloc(&d_clust_sizes,CANT_MEANS*sizeof(float));
check_CUDA_Error("ERROR en hipMalloc d_clust_sizes ");
//Copio lo que hay en host a device
hipMemcpy(d_clust_sizes,h_clust_sizes,CANT_MEANS*sizeof(int),hipMemcpyHostToDevice);
check_CUDA_Error("ERROR en hipMemcpy d_clust_sizes ");
//Almacena contador de cambios de items
int *countChangeItem_dev = 0;
hipMalloc(&countChangeItem_dev,sizeof(int));
int *countChangeItem = (int*)malloc(sizeof(int));
//Calcula las medias
for(int j = 0; j < CANT_ITERACIONES; j++) {
printf("Iteracion: %d\n", j);
//En cada iteracion, cantidad de cambios es 0
//memset(countChangeItem, 0, sizeof(int));
//Paso lo que hay en means a la placa luego de cambiarlo
hipMemcpy( means_dev, &means[0][0], CANT_MEANS*CANT_FEATURES*sizeof(double), hipMemcpyHostToDevice );
check_CUDA_Error("ERROR en hipMemcpy means_dev");
//Reseteo la cantidad de elementos de cada media en cada iteracion
hipMemset(d_clust_sizes,0,CANT_MEANS*sizeof(int));
check_CUDA_Error("ERROR en hipMemset means_dev");
hipLaunchKernelGGL(( kMeansClusterAssignment), dim3(nBloques),dim3(hilosB), 0, 0, items_dev, means_dev, clusterAsignado_dev, countChangeItem_dev);
//Copio las nuevas medias obtenidas en la placa a las medias de Host
hipMemcpy(countChangeItem,countChangeItem_dev,sizeof(int),hipMemcpyDeviceToHost);
//Reseteo means para la placa, ya que se va a cambiar
hipMemset(means_dev,0,CANT_MEANS*CANT_FEATURES*sizeof(double));
check_CUDA_Error("ERROR en hipMemset means_dev");
hipLaunchKernelGGL(( kMeansCentroidUpdate), dim3(nBloques),dim3(hilosB), 0, 0, items_dev,clusterAsignado_dev,means_dev,d_clust_sizes);
//Copio las nuevas medias obtenidas en la placa a las medias de Host
hipMemcpy(&means[0][0],means_dev,CANT_MEANS*CANT_FEATURES*sizeof(double),hipMemcpyDeviceToHost);
check_CUDA_Error("ERROR en hipMemcpy means_dev 3");
//Copio la cantidad de items de cada medias obtenidas en la placa al arreglo del host
hipMemcpy(h_clust_sizes, d_clust_sizes, CANT_MEANS*sizeof(int), hipMemcpyDeviceToHost );
check_CUDA_Error("ERROR en hipMemcpy h_clust_sizes ");
for (int a = 0; a < CANT_MEANS; a++)
{
for(int b=0; b < CANT_FEATURES; b++)
{
//Asigno el nuevo valor de las medias sacando promedio
means[a][b] = means[a][b] / h_clust_sizes[a];
}
printf("Mean[%d] -> (%lf,%lf,%lf)\n", a, means[a][0], means[a][1], means[a][2]);
printf("Cluster[%d] -> %d\n", a, h_clust_sizes[a]);
}
//Comparo la cantidad de items cambiado en la iteracion actual con la anterior y si es menor al porcentaje
//se deja de iterar
printf("Cant cambios: %d\n",*countChangeItem);
if(*countChangeItem < minPorcentaje){break;}
//Reseteo cantidad de camios para la placa, ya que se va a cambiar
hipMemset(countChangeItem_dev,0,sizeof(int));
}
hipFree(items_dev);
hipFree(means_dev);
hipFree(d_clust_sizes);
free(h_clust_sizes);
hipFree(countChangeItem_dev);
free(countChangeItem);
return means;
}
/**
* @brief Funcion que se encarga de obtener las sumas en cada media y la cantidad de elementos
* @param items_dev Items a clasificar, cada item contiene un valor por Feature, representada como arreglo 1D
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param d_clust_sizes Arreglo 1D de la cantidad de items de cada media del cluster
*/
__global__ void kMeansCentroidUpdate(double *items_dev, int *clusterAsignado_dev, double *means_dev, int *d_clust_sizes)
{
//Obtengo el ID de cada hilo
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
//Elimino aquellos que no deban trabajar
if (idx >= CANT_ITEMS_CUDA) return;
//Obtengo el ID de los hilos a nivel de bloque
const int s_idx = threadIdx.x;
//Armo un arreglo de items para cada bloque en memoria compartida
__shared__ double items_bloque[HILOS][CANT_FEATURES];
for(int i = 0; i < CANT_FEATURES; i++){
items_bloque[s_idx][i] = items_dev[idx*CANT_FEATURES + i];
}
//Armo un arreglo de los cluster asignados para cada bloque en memoria compartida
__shared__ int clusterAsignado_bloque[HILOS];
clusterAsignado_bloque[s_idx] = clusterAsignado_dev[idx];
__syncthreads();
//Si es el hilo 0 de cada bloque, entonces suma los valores dentro de los arreglo compartido
if(s_idx==0)
{
int limite = ((idx + blockDim.x) < CANT_ITEMS_CUDA)? blockDim.x : (CANT_ITEMS_CUDA - idx);
//Creo arreglos de suma de valores del cluster del bloque y la cantidad de items de cada media
double clust_sums[CANT_MEANS][CANT_FEATURES]={{0},{0},{0},{0}};
int clust_sizes[CANT_MEANS]={0};
//Se recorre el bloque, incrementando el cluster sizes de acuerdo a la media asignada y lo sumo
for(int j=0; j < limite; ++j)
{
int clust_id = clusterAsignado_bloque[j];
clust_sizes[clust_id]+=1;
for(int k = 0; k < CANT_FEATURES; ++k)
{
clust_sums[clust_id][k]+=items_bloque[j][k];
}
}
//Por ultimo agregamos de forma atomica al arreglo means_dev la suma de todos los items designados en cada cluster
//y al arreglo d_clust_sizes la cantidad de items en cada media
int indice;
for(int z=0; z < CANT_MEANS; ++z)
{
indice = z*CANT_FEATURES;
for(int s=0; s < CANT_FEATURES ; s++)
{
atomicAdd(&means_dev[indice+s],clust_sums[z][s]);
}
atomicAdd(&d_clust_sizes[z],clust_sizes[z]);
}
}
__syncthreads();
}
/**
* @brief Funcion que se encarga de asignar los indices de cluster a cada item
* @param items_dev Items a clasificar, cada item contiene un valor por Feature, representada como arreglo 1D
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
* @param countChangeItem_dev Cantidad de items que cambian
*/
__global__ void kMeansClusterAssignment(double *items_dev, double *means_dev, int *clusterAsignado_dev,int *countChangeItem_dev )
{
//Obtengo el ID para cada hilo
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
//Descarto aquellos hilos que no deban trabajar
if (idx >= CANT_ITEMS_CUDA) return;
//Obtengo el item correspondiente a cada hilo
double *item = &items_dev[idx*CANT_FEATURES];
u_int64_t index = Classify(means_dev, item, CANT_MEANS, CANT_FEATURES);
if(clusterAsignado_dev[idx] != (int)index)
{
atomicAdd(countChangeItem_dev,1);
}
//Asigno cada item en un cluster y almaceno el indice de clasificacion en un arreglo
clusterAsignado_dev[idx]=(int)index;
}
/**
* @brief Funcion que se encarga de obtener el indice del cluster al que pertenece el item
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param item Item a clasificar
* @param cant_means Cantidad de Medias
* @param cant_features Cantidad de Features
* @return Indice del cluster al que corresponde el Item
*/
__device__ u_int64_t Classify(double* means_dev, double* item, int cant_means, int cant_features){
double minimun = MAX_DOUBLE;
int index = -1;
double distance;
for(int i = 0; i < cant_means; i++){
//calcula la distancia de un item a la media
//printf("Means_dev: %ld\n", means_dev[i*3]);
distance = distanciaEuclidiana(item, &means_dev[i*cant_features], cant_features);
if(distance < minimun){
minimun = distance;
index = i;
}
}
return (u_int64_t) index;
}
/**
* @brief Funcion que se encarga de calcular la distancia Euclideana entre el item y las distintas Medias (2 vectores)
* @param x Item (Vector 1)
* @param y Medias (Vector 2)
* @param length longitud del vector (Cantidad de Features)
* @return Distancia euclidiana entre ambos vectores.
*/
__device__ double distanciaEuclidiana(double* x , double* y, int length){
double distancia = 0;
for(int i = 0; i < length; i++){
distancia += pow((x[i] - y[i]), 2);
}
return sqrt(distancia);
}
/**
* @brief Funcion que se encarga de calcular la cantidad de items a clasificar
* @param filename nombre del archivo
* @return cantidad de lineas (o items) del archivo
*/
u_int64_t CalcLines(char filename[50]) {
printf(filename);
FILE *f = fopen(filename, "r");
u_int64_t cant_lines = 0;
char* cadena = (char*) calloc(100, sizeof(char));
char* valor;
while(fgets(cadena, 100, f)){
valor = strstr(cadena, ",");
valor++;
if(valor != NULL && strcmp(valor,"values\n") && strcmp(valor,"\n")){
cant_lines++;
}
}
free (cadena);
fclose(f);
printf("Cantidad de items: %ld\n", cant_lines);
return cant_lines;
}
/**
* @brief Funcion que se encarga allocar una matriz 2D
* @param rows filas de la matriz
* @param cols columnas de la matriz
* @return Matriz 2D
*/
double **alloc_2d_double(u_int64_t rows, u_int64_t cols) {
double *data = (double *)malloc(rows * cols * sizeof(double));
double **array= (double **)malloc(rows * sizeof(double*));
for (u_int64_t i = 0; i < rows; i++)
array[i] = &(data[cols*i]);
return array;
}
/**
* @brief Busca el minimo y maximo valor para cada feature del arreglo items.
* @param items datos a clasificar
* @param size_lines cantidad de items
* @param minimo arreglo de los valores minimos de cada feature
* @param maximo arreglo de los valores maximos de cada feature
* @param cant_features cantidad de caracteristicas que tiene cada item
*/
void searchMinMax(double** items, u_int64_t size_lines, double* minimo, double* maximo, u_int8_t cant_features){
//Define el maximo como el minimo valor de tipo DOUBLE y el minimo como el maximo valor de tipo DOUBLE
for(int n = 0; n < cant_features; n++){
maximo[n] = DBL_MIN;
minimo[n] = DBL_MAX;
}
for(u_int64_t i = 0; i < size_lines; i++){ //recorremos cada item
for(u_int8_t j = 0; j < cant_features; j++){ //recorremos cada feature
if(items[i][j] < minimo[j]){
minimo[j] = items[i][j];
}
if(items[i][j] > maximo[j]){
maximo[j] = items[i][j];
}
}
}
printf("maximos: %lf, %lf, %lf\n", maximo[0], maximo[1], maximo[2]);
printf("minimos: %lf, %lf, %lf\n", minimo[0], minimo[1], minimo[2]);
}
/**
* @brief Lee el archivo indicado y carga el arreglo de items.
* @param filename string nombre del archivo que contiene los datos
* @param size_lines cantidad de lineas del archivo
* @param cant_features cantidad de features de cada item (cantidad de columnas del archivo separadas por comas)
* @return arreglo doble con cantidad de filas igual a cantidad de items y cantidad de columnas igual a cantidad de features.
*/
double** ReadData(char filename[50], u_int64_t size_lines, u_int8_t cant_features){
FILE *file = fopen(filename, "r");
rewind(file);
//Definimos un arreglo de arreglos (cada item consta de 2 o mas features)
double** items = (double **) alloc_2d_double(size_lines, cant_features);
char* line = (char*)calloc(100, sizeof(char));
double feature;
u_int64_t i = 0, j = 0;
char* ptr;
while(fgets(line, 100, file)){
j = 0;
char *item = strstr(line, ","); //se ignora el primer elemento del archivo (indice)
item++;
if(item != NULL && strcmp(item, "values\n") && strcmp(item, "\n")){ //Para recortar la cadena y tomar solo el segundo dato
// item[strlen(item)-1] = '\0';
char *token = strtok(item, ","); //separa los elementos de la linea por comas
while(token != NULL){
feature = strtod(token, &ptr); //Pasaje a double
items[i][j] = feature; //Almacenamiento en item
j++;
token = strtok(NULL, ","); //busco el siguiente token
}
i++;
}
}
free(line);
fclose(file);
return items;
}
/**
* @brief Funcion que se encarga de detectar error de CUDA
* @param mensaje Mensaje de error CUDA
*/
__host__ void check_CUDA_Error(const char *mensaje)
{
hipError_t error;
hipDeviceSynchronize();
error = hipGetLastError();
if(error != hipSuccess)
{
printf("ERROR %d: %s (%s)\n", error, hipGetErrorString(error), mensaje);
printf("\npulsa INTRO para finalizar...");
fflush(stdin);
char tecla = getchar();
exit(-1);
}
}
/**
* @brief Inicializa el arreglo de medias en valores equiespaciados en el rango de datos.
* @param cant_means cantidad de medias o clusters
* @param cMin vector con los valores minimos de cada feature
* @param cMax vector con los valores maximos de cada feature
* @param cant_features cantidad de features (o columnas) de cada item
* @return arreglo con las medias (1 por cada cluster).
* Ejemplo: range: 20 (0 a 19)
* cantMeans -> 4
* jump: 20 / 4 = 5
* means[0] = 0 + 0.5 * 5 = 2.5
* means[1] = 0 + 1.5 * 5 = 7.5
* means[2] = 0 + 2.5 * 5 = 12.5
* means[3] = 0 + 3.5 * 5 = 17.5
*/
double** InitializeMeans(u_int16_t cant_means, double* cMin, double* cMax, u_int8_t cant_features){
/* |__Feature 0__|__Feature 1__|__Feature 2__|
Media0|_____________|_____________|_____________|
Media1|_____________|_____________|_____________|
*/
double **means = (double **) alloc_2d_double(cant_means, cant_features);
//definimos el salto de un valor de media al siguiente
double *jump = (double *) malloc(cant_features * sizeof(double));
for(u_int8_t n = 0; n < cant_features; n++){
jump[n] = (double) (cMax[n] - cMin[n]) / cant_means;
}
printf("\nValores de las medias iniciales:\n");
for(u_int16_t i = 0; i < cant_means; i++){
for(u_int8_t j = 0; j < cant_features; j++){
means[i][j] = cMin[j] + (0.5 + i) * jump[j];
}
printf("Mean[%d] -> (%lf,%lf,%lf)\n", i, means[i][0], means[i][1], means[i][2]);
}
free(jump);
return means;
}
|
00c2bbdd81a91e97234bb8980b74c1ec37e2b0ae.cu
|
#include "stdio.h"
#include "float.h"
#include <cuda.h>
#define HILOS 128
#define PATH "./inputs/randomData_2M_3feature.csv"
#define CANT_FEATURES 3
#define CANT_MEANS 4
#define CANT_ITERACIONES 100
#define MAX_DOUBLE DBL_MAX
//Funciones CUDA
__global__ void kMeansClusterAssignment(double* means_dev, double* items_dev, int *clusterAsignado_dev,int *countChangeItem_dev );
__global__ void kMeansCentroidUpdate(double *items_dev, int *clusterAsignado_dev, double *means_dev, int *d_clust_sizes);
__device__ u_int64_t Classify(double* means_dev, double* item, int cant_means, int cant_features);
__device__ double distanciaEuclidiana(double* x , double* y, int length);
//Funciones HOST
double** CalculateMeans(double* items_dev, double** means, u_int64_t size_lines, int *clusterAsignado_dev, int nBloques, int hilosB);
double*** FindClusters(int *clusterAsignado_dev, u_int64_t cant_items, double **items);
u_int64_t CalcLines(char filename[50]);
double **alloc_2d_double(u_int64_t rows, u_int64_t cols);
double** ReadData(char filename[50], u_int64_t size_lines, u_int8_t cant_features);
void searchMinMax(double** items, u_int64_t size_lines, double* minimo, double* maximo, u_int8_t cant_features);
double** InitializeMeans(u_int16_t cant_means, double* cMin, double* cMax, u_int8_t cant_features);
__host__ void check_CUDA_Error(const char *mensaje);
//Constantes de CUDA
__constant__ u_int64_t CANT_ITEMS_CUDA;
int main()
{
//Declaracion de eventos para tomar tiempos
cudaEvent_t start;
cudaEvent_t stop;
//Creacion de eventos
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Marca de inicio CalcLines y ReadData
cudaEventRecord(start,0);
//Calcula la cantidad de lineas del CSV
u_int64_t size_lines = CalcLines(PATH);
cudaMemcpyToSymbol(CANT_ITEMS_CUDA, &size_lines, sizeof(u_int64_t));
check_CUDA_Error("ERROR en cudaMemcpyToSymbol");
// double maxDouble = DBL_MAX;
// cudaMemcpyToSymbol(MAX_DOUBLE, &maxDouble, sizeof(double));
// check_CUDA_Error("ERROR en cudaMemcpyToSymbol");
double **items = ReadData(PATH, size_lines, CANT_FEATURES);
//Marca de final CalcLines y ReadData
cudaEventRecord(stop,0);
//Sincronizacion GPU-CPU
cudaEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime2;
cudaEventElapsedTime(&elapsedTime2,start,stop);
//Marca de inicio SearchMinMax, Calculo de hilos-bloques CUDA e Inicializacion Medias
cudaEventRecord(start,0);
double *cMin, *cMax;
cMin = (double*) malloc(CANT_FEATURES * sizeof(double));
cMax = (double*) malloc(CANT_FEATURES * sizeof(double));
//Encuentra el minimo y maximo de cada columna (o feature)
searchMinMax(items, size_lines, cMin, cMax, CANT_FEATURES);
printf("MIN: %lf, MAX: %lf\n", cMin[0], cMax[0]);
// calculamos el numero de bloques necesario para un tamaño de bloque fijo
int nBloques = size_lines/HILOS;
if (size_lines%HILOS != 0)
{
nBloques = nBloques + 1;
}
int hilosB = HILOS;
//Inicializa las means (medias) con valores estimativos
double** means = InitializeMeans(CANT_MEANS, cMin, cMax, CANT_FEATURES);
//Marca de final SearchMinMax, Calculo de hilos-bloques CUDA e Inicializacion Medias
cudaEventRecord(stop,0);
//Sincronizacion GPU-CPU
cudaEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime3;
cudaEventElapsedTime(&elapsedTime3,start,stop);
//Almacena los indices de los items
int *clusterAsignado_dev = 0;
cudaMalloc(&clusterAsignado_dev,size_lines*sizeof(int));
cudaMemset(clusterAsignado_dev,0,size_lines*sizeof(int));
double* items_dev;
cudaMalloc( (void**)&items_dev, size_lines*CANT_FEATURES*sizeof(double));
check_CUDA_Error("ERROR en cudaMalloc");
cudaMemcpy( items_dev, &items[0][0], size_lines*CANT_FEATURES*sizeof(double), cudaMemcpyHostToDevice );
check_CUDA_Error("ERROR en cudaMemcpy items_dev");
//Marca de inicio CalculateMeans
cudaEventRecord(start,0);
//Funcion que calcula las medias nuevas
means = CalculateMeans(items_dev, means, size_lines, clusterAsignado_dev ,nBloques, hilosB);
//Marca de final CalculateMeans
cudaEventRecord(stop,0);
//Sincronizacion GPU-CPU
cudaEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
//Marca de inicio FindCluster
cudaEventRecord(start,0);
//Funcion que calcula las medias nuevas
double ***clusters = FindClusters(clusterAsignado_dev, size_lines, items);
//Marca de final CalculateMeans
cudaEventRecord(stop,0);
//Sincronizacion GPU-CPU
cudaEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime4;
cudaEventElapsedTime(&elapsedTime4,start,stop);
//Liberacion de recursos
for(int n = 0; n < CANT_MEANS; n++){
for(u_int64_t m = 0; m < size_lines; m++){
free(clusters[n][m]);
}
free(clusters[n]);
}
free(clusters);
free(items[0]);
free(items);
free(means[0]);
free(means);
free(cMin);
free(cMax);
cudaFree(clusterAsignado_dev);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//Impresion de resultados
printf("> Tiempo de ejecucion de CalcLines y ReadData: %f ms\n",elapsedTime2);
printf("> Tiempo de ejecucion de SearchMinMax, Calculo de hilos-bloques CUDA e Inicializacion Medias: %f ms\n",elapsedTime3);
printf("> Tiempo de ejecucion de CalculateMeans: %f ms\n",elapsedTime);
printf("> Tiempo de ejecucion de FindCluster: %f ms\n",elapsedTime4);
printf("> Tiempo de total del programa: %f ms\n", elapsedTime + elapsedTime2 + elapsedTime3 + elapsedTime4);
return EXIT_SUCCESS;
}
/**
* @brief Funcion que se encarga de armar una matriz 3D, donde se insertaran los items de acuerdo a su clasificacion
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
* @param cant_items Cantidad de items
* @param items Items a clasificar
* @return Arreglo 3D de Clusters finales de acuerdo a la clasificacion de los items en cada media
*/
double*** FindClusters(int *clusterAsignado_dev, u_int64_t cant_items, double **items)
{
// clusters es un array de 3 dimensiones, es un conjunto de clusters.
// cada cluster es un conjunto de items.
// cada item es un conjunto de features.
double ***clusters = (double ***) malloc(CANT_MEANS * sizeof(double**));
//Inicializa clusters
for(u_int8_t n = 0; n < CANT_MEANS; n++){
clusters[n] = (double **) malloc(cant_items * sizeof(double*));
for(u_int64_t m = 0; m < cant_items; m++){
clusters[n][m] = (double *) malloc(CANT_FEATURES * sizeof(double));
}
}
int *clusterAsignado = (int*)malloc(cant_items*sizeof(int));
cudaMemcpy(clusterAsignado, clusterAsignado_dev, cant_items*sizeof(int), cudaMemcpyDeviceToHost );
int indices_series[CANT_MEANS];
memset(indices_series, 0, sizeof(int)*CANT_MEANS);
for(u_int64_t i = 0; i < cant_items; i++){
for(u_int8_t j = 0; j < CANT_FEATURES; j++){ //se cargan todas las features del item al cluster
clusters[clusterAsignado[i]][indices_series[clusterAsignado[i]]][j] = items[i][j];
}
indices_series[clusterAsignado[i]]++;
}
return clusters;
}
/**
* @brief Funcion que se encarga de clasificar los items en las medias correspondientes
* @param items_dev Items a clasificar, cada item contiene un valor por Feature, representada como arreglo 1D
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param size_lines Cantidad de items
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
* @param nBloques Cantidad de bloques CUDA
* @param hilosB Cantidad de hilos CUDA
* @return Arreglo 2D de Medias finales de acuerdo a la clasificacion de los items
*/
double** CalculateMeans(double* items_dev, double** means, u_int64_t size_lines, int *clusterAsignado_dev, int nBloques, int hilosB)
{
double minPorcentaje;
//define el porcentaje minimo de cambio de items entre clusters para que continue la ejecucion del algoritmo
minPorcentaje = 0.001 * (double) size_lines;
printf("Porentaje minimo = %.2lf\n", minPorcentaje);
double* means_dev;
cudaMalloc( (void**)&means_dev, CANT_MEANS*CANT_FEATURES*sizeof(double));
check_CUDA_Error("ERROR en cudaMalloc");
/*Arreglo de cluster sizes*/
//Creo y reseteo a 0 la variable de host
int *h_clust_sizes = (int*)malloc(CANT_MEANS*sizeof(int));
memset(h_clust_sizes, 0, sizeof(int)*CANT_MEANS);
//cudaMemset(countChangeItem_dev, 0, sizeof(int));
//Creo la variable de device
int *d_clust_sizes = 0;
cudaMalloc(&d_clust_sizes,CANT_MEANS*sizeof(float));
check_CUDA_Error("ERROR en cudaMalloc d_clust_sizes ");
//Copio lo que hay en host a device
cudaMemcpy(d_clust_sizes,h_clust_sizes,CANT_MEANS*sizeof(int),cudaMemcpyHostToDevice);
check_CUDA_Error("ERROR en cudaMemcpy d_clust_sizes ");
//Almacena contador de cambios de items
int *countChangeItem_dev = 0;
cudaMalloc(&countChangeItem_dev,sizeof(int));
int *countChangeItem = (int*)malloc(sizeof(int));
//Calcula las medias
for(int j = 0; j < CANT_ITERACIONES; j++) {
printf("Iteracion: %d\n", j);
//En cada iteracion, cantidad de cambios es 0
//memset(countChangeItem, 0, sizeof(int));
//Paso lo que hay en means a la placa luego de cambiarlo
cudaMemcpy( means_dev, &means[0][0], CANT_MEANS*CANT_FEATURES*sizeof(double), cudaMemcpyHostToDevice );
check_CUDA_Error("ERROR en cudaMemcpy means_dev");
//Reseteo la cantidad de elementos de cada media en cada iteracion
cudaMemset(d_clust_sizes,0,CANT_MEANS*sizeof(int));
check_CUDA_Error("ERROR en cudaMemset means_dev");
kMeansClusterAssignment<<<nBloques,hilosB>>>(items_dev, means_dev, clusterAsignado_dev, countChangeItem_dev);
//Copio las nuevas medias obtenidas en la placa a las medias de Host
cudaMemcpy(countChangeItem,countChangeItem_dev,sizeof(int),cudaMemcpyDeviceToHost);
//Reseteo means para la placa, ya que se va a cambiar
cudaMemset(means_dev,0,CANT_MEANS*CANT_FEATURES*sizeof(double));
check_CUDA_Error("ERROR en cudaMemset means_dev");
kMeansCentroidUpdate<<<nBloques,hilosB>>>(items_dev,clusterAsignado_dev,means_dev,d_clust_sizes);
//Copio las nuevas medias obtenidas en la placa a las medias de Host
cudaMemcpy(&means[0][0],means_dev,CANT_MEANS*CANT_FEATURES*sizeof(double),cudaMemcpyDeviceToHost);
check_CUDA_Error("ERROR en cudaMemcpy means_dev 3");
//Copio la cantidad de items de cada medias obtenidas en la placa al arreglo del host
cudaMemcpy(h_clust_sizes, d_clust_sizes, CANT_MEANS*sizeof(int), cudaMemcpyDeviceToHost );
check_CUDA_Error("ERROR en cudaMemcpy h_clust_sizes ");
for (int a = 0; a < CANT_MEANS; a++)
{
for(int b=0; b < CANT_FEATURES; b++)
{
//Asigno el nuevo valor de las medias sacando promedio
means[a][b] = means[a][b] / h_clust_sizes[a];
}
printf("Mean[%d] -> (%lf,%lf,%lf)\n", a, means[a][0], means[a][1], means[a][2]);
printf("Cluster[%d] -> %d\n", a, h_clust_sizes[a]);
}
//Comparo la cantidad de items cambiado en la iteracion actual con la anterior y si es menor al porcentaje
//se deja de iterar
printf("Cant cambios: %d\n",*countChangeItem);
if(*countChangeItem < minPorcentaje){break;}
//Reseteo cantidad de camios para la placa, ya que se va a cambiar
cudaMemset(countChangeItem_dev,0,sizeof(int));
}
cudaFree(items_dev);
cudaFree(means_dev);
cudaFree(d_clust_sizes);
free(h_clust_sizes);
cudaFree(countChangeItem_dev);
free(countChangeItem);
return means;
}
/**
* @brief Funcion que se encarga de obtener las sumas en cada media y la cantidad de elementos
* @param items_dev Items a clasificar, cada item contiene un valor por Feature, representada como arreglo 1D
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param d_clust_sizes Arreglo 1D de la cantidad de items de cada media del cluster
*/
__global__ void kMeansCentroidUpdate(double *items_dev, int *clusterAsignado_dev, double *means_dev, int *d_clust_sizes)
{
//Obtengo el ID de cada hilo
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
//Elimino aquellos que no deban trabajar
if (idx >= CANT_ITEMS_CUDA) return;
//Obtengo el ID de los hilos a nivel de bloque
const int s_idx = threadIdx.x;
//Armo un arreglo de items para cada bloque en memoria compartida
__shared__ double items_bloque[HILOS][CANT_FEATURES];
for(int i = 0; i < CANT_FEATURES; i++){
items_bloque[s_idx][i] = items_dev[idx*CANT_FEATURES + i];
}
//Armo un arreglo de los cluster asignados para cada bloque en memoria compartida
__shared__ int clusterAsignado_bloque[HILOS];
clusterAsignado_bloque[s_idx] = clusterAsignado_dev[idx];
__syncthreads();
//Si es el hilo 0 de cada bloque, entonces suma los valores dentro de los arreglo compartido
if(s_idx==0)
{
int limite = ((idx + blockDim.x) < CANT_ITEMS_CUDA)? blockDim.x : (CANT_ITEMS_CUDA - idx);
//Creo arreglos de suma de valores del cluster del bloque y la cantidad de items de cada media
double clust_sums[CANT_MEANS][CANT_FEATURES]={{0},{0},{0},{0}};
int clust_sizes[CANT_MEANS]={0};
//Se recorre el bloque, incrementando el cluster sizes de acuerdo a la media asignada y lo sumo
for(int j=0; j < limite; ++j)
{
int clust_id = clusterAsignado_bloque[j];
clust_sizes[clust_id]+=1;
for(int k = 0; k < CANT_FEATURES; ++k)
{
clust_sums[clust_id][k]+=items_bloque[j][k];
}
}
//Por ultimo agregamos de forma atomica al arreglo means_dev la suma de todos los items designados en cada cluster
//y al arreglo d_clust_sizes la cantidad de items en cada media
int indice;
for(int z=0; z < CANT_MEANS; ++z)
{
indice = z*CANT_FEATURES;
for(int s=0; s < CANT_FEATURES ; s++)
{
atomicAdd(&means_dev[indice+s],clust_sums[z][s]);
}
atomicAdd(&d_clust_sizes[z],clust_sizes[z]);
}
}
__syncthreads();
}
/**
* @brief Funcion que se encarga de asignar los indices de cluster a cada item
* @param items_dev Items a clasificar, cada item contiene un valor por Feature, representada como arreglo 1D
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
* @param countChangeItem_dev Cantidad de items que cambian
*/
__global__ void kMeansClusterAssignment(double *items_dev, double *means_dev, int *clusterAsignado_dev,int *countChangeItem_dev )
{
//Obtengo el ID para cada hilo
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
//Descarto aquellos hilos que no deban trabajar
if (idx >= CANT_ITEMS_CUDA) return;
//Obtengo el item correspondiente a cada hilo
double *item = &items_dev[idx*CANT_FEATURES];
u_int64_t index = Classify(means_dev, item, CANT_MEANS, CANT_FEATURES);
if(clusterAsignado_dev[idx] != (int)index)
{
atomicAdd(countChangeItem_dev,1);
}
//Asigno cada item en un cluster y almaceno el indice de clasificacion en un arreglo
clusterAsignado_dev[idx]=(int)index;
}
/**
* @brief Funcion que se encarga de obtener el indice del cluster al que pertenece el item
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param item Item a clasificar
* @param cant_means Cantidad de Medias
* @param cant_features Cantidad de Features
* @return Indice del cluster al que corresponde el Item
*/
__device__ u_int64_t Classify(double* means_dev, double* item, int cant_means, int cant_features){
double minimun = MAX_DOUBLE;
int index = -1;
double distance;
for(int i = 0; i < cant_means; i++){
//calcula la distancia de un item a la media
//printf("Means_dev: %ld\n", means_dev[i*3]);
distance = distanciaEuclidiana(item, &means_dev[i*cant_features], cant_features);
if(distance < minimun){
minimun = distance;
index = i;
}
}
return (u_int64_t) index;
}
/**
* @brief Funcion que se encarga de calcular la distancia Euclideana entre el item y las distintas Medias (2 vectores)
* @param x Item (Vector 1)
* @param y Medias (Vector 2)
* @param length longitud del vector (Cantidad de Features)
* @return Distancia euclidiana entre ambos vectores.
*/
__device__ double distanciaEuclidiana(double* x , double* y, int length){
double distancia = 0;
for(int i = 0; i < length; i++){
distancia += pow((x[i] - y[i]), 2);
}
return sqrt(distancia);
}
/**
* @brief Funcion que se encarga de calcular la cantidad de items a clasificar
* @param filename nombre del archivo
* @return cantidad de lineas (o items) del archivo
*/
u_int64_t CalcLines(char filename[50]) {
printf(filename);
FILE *f = fopen(filename, "r");
u_int64_t cant_lines = 0;
char* cadena = (char*) calloc(100, sizeof(char));
char* valor;
while(fgets(cadena, 100, f)){
valor = strstr(cadena, ",");
valor++;
if(valor != NULL && strcmp(valor,"values\n") && strcmp(valor,"\n")){
cant_lines++;
}
}
free (cadena);
fclose(f);
printf("Cantidad de items: %ld\n", cant_lines);
return cant_lines;
}
/**
* @brief Funcion que se encarga allocar una matriz 2D
* @param rows filas de la matriz
* @param cols columnas de la matriz
* @return Matriz 2D
*/
double **alloc_2d_double(u_int64_t rows, u_int64_t cols) {
double *data = (double *)malloc(rows * cols * sizeof(double));
double **array= (double **)malloc(rows * sizeof(double*));
for (u_int64_t i = 0; i < rows; i++)
array[i] = &(data[cols*i]);
return array;
}
/**
* @brief Busca el minimo y maximo valor para cada feature del arreglo items.
* @param items datos a clasificar
* @param size_lines cantidad de items
* @param minimo arreglo de los valores minimos de cada feature
* @param maximo arreglo de los valores maximos de cada feature
* @param cant_features cantidad de caracteristicas que tiene cada item
*/
void searchMinMax(double** items, u_int64_t size_lines, double* minimo, double* maximo, u_int8_t cant_features){
//Define el maximo como el minimo valor de tipo DOUBLE y el minimo como el maximo valor de tipo DOUBLE
for(int n = 0; n < cant_features; n++){
maximo[n] = DBL_MIN;
minimo[n] = DBL_MAX;
}
for(u_int64_t i = 0; i < size_lines; i++){ //recorremos cada item
for(u_int8_t j = 0; j < cant_features; j++){ //recorremos cada feature
if(items[i][j] < minimo[j]){
minimo[j] = items[i][j];
}
if(items[i][j] > maximo[j]){
maximo[j] = items[i][j];
}
}
}
printf("maximos: %lf, %lf, %lf\n", maximo[0], maximo[1], maximo[2]);
printf("minimos: %lf, %lf, %lf\n", minimo[0], minimo[1], minimo[2]);
}
/**
* @brief Lee el archivo indicado y carga el arreglo de items.
* @param filename string nombre del archivo que contiene los datos
* @param size_lines cantidad de lineas del archivo
* @param cant_features cantidad de features de cada item (cantidad de columnas del archivo separadas por comas)
* @return arreglo doble con cantidad de filas igual a cantidad de items y cantidad de columnas igual a cantidad de features.
*/
double** ReadData(char filename[50], u_int64_t size_lines, u_int8_t cant_features){
FILE *file = fopen(filename, "r");
rewind(file);
//Definimos un arreglo de arreglos (cada item consta de 2 o mas features)
double** items = (double **) alloc_2d_double(size_lines, cant_features);
char* line = (char*)calloc(100, sizeof(char));
double feature;
u_int64_t i = 0, j = 0;
char* ptr;
while(fgets(line, 100, file)){
j = 0;
char *item = strstr(line, ","); //se ignora el primer elemento del archivo (indice)
item++;
if(item != NULL && strcmp(item, "values\n") && strcmp(item, "\n")){ //Para recortar la cadena y tomar solo el segundo dato
// item[strlen(item)-1] = '\0';
char *token = strtok(item, ","); //separa los elementos de la linea por comas
while(token != NULL){
feature = strtod(token, &ptr); //Pasaje a double
items[i][j] = feature; //Almacenamiento en item
j++;
token = strtok(NULL, ","); //busco el siguiente token
}
i++;
}
}
free(line);
fclose(file);
return items;
}
/**
* @brief Funcion que se encarga de detectar error de CUDA
* @param mensaje Mensaje de error CUDA
*/
__host__ void check_CUDA_Error(const char *mensaje)
{
cudaError_t error;
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("ERROR %d: %s (%s)\n", error, cudaGetErrorString(error), mensaje);
printf("\npulsa INTRO para finalizar...");
fflush(stdin);
char tecla = getchar();
exit(-1);
}
}
/**
* @brief Inicializa el arreglo de medias en valores equiespaciados en el rango de datos.
* @param cant_means cantidad de medias o clusters
* @param cMin vector con los valores minimos de cada feature
* @param cMax vector con los valores maximos de cada feature
* @param cant_features cantidad de features (o columnas) de cada item
* @return arreglo con las medias (1 por cada cluster).
* Ejemplo: range: 20 (0 a 19)
* cantMeans -> 4
* jump: 20 / 4 = 5
* means[0] = 0 + 0.5 * 5 = 2.5
* means[1] = 0 + 1.5 * 5 = 7.5
* means[2] = 0 + 2.5 * 5 = 12.5
* means[3] = 0 + 3.5 * 5 = 17.5
*/
double** InitializeMeans(u_int16_t cant_means, double* cMin, double* cMax, u_int8_t cant_features){
/* |__Feature 0__|__Feature 1__|__Feature 2__|
Media0|_____________|_____________|_____________|
Media1|_____________|_____________|_____________|
*/
double **means = (double **) alloc_2d_double(cant_means, cant_features);
//definimos el salto de un valor de media al siguiente
double *jump = (double *) malloc(cant_features * sizeof(double));
for(u_int8_t n = 0; n < cant_features; n++){
jump[n] = (double) (cMax[n] - cMin[n]) / cant_means;
}
printf("\nValores de las medias iniciales:\n");
for(u_int16_t i = 0; i < cant_means; i++){
for(u_int8_t j = 0; j < cant_features; j++){
means[i][j] = cMin[j] + (0.5 + i) * jump[j];
}
printf("Mean[%d] -> (%lf,%lf,%lf)\n", i, means[i][0], means[i][1], means[i][2]);
}
free(jump);
return means;
}
|
e929be44a42e9a6ba18002f90e4c0b7ce4fb6c79.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
/*
* Build out this kernel.
*/
}
/*
* This CPU function already works, and will run to create a solution matrix
* against which to verify your work building out the matrixMulGPU kernel.
*/
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu; // Allocate a solution matrix for both the CPU and the GPU operations
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
hipMallocManaged (&a, size);
hipMallocManaged (&b, size);
hipMallocManaged (&c_cpu, size);
hipMallocManaged (&c_gpu, size);
// Initialize memory; create 2D matrices
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
/*
* Assign `threads_per_block` and `number_of_blocks` 2D values
* that can be used in matrixMulGPU above.
*/
dim3 threads_per_block;
dim3 number_of_blocks;
hipLaunchKernelGGL(( matrixMulGPU) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c_gpu );
hipDeviceSynchronize();
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
hipFree(a); hipFree(b);
hipFree( c_cpu ); hipFree( c_gpu );
}
|
e929be44a42e9a6ba18002f90e4c0b7ce4fb6c79.cu
|
#include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
/*
* Build out this kernel.
*/
}
/*
* This CPU function already works, and will run to create a solution matrix
* against which to verify your work building out the matrixMulGPU kernel.
*/
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu; // Allocate a solution matrix for both the CPU and the GPU operations
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
cudaMallocManaged (&a, size);
cudaMallocManaged (&b, size);
cudaMallocManaged (&c_cpu, size);
cudaMallocManaged (&c_gpu, size);
// Initialize memory; create 2D matrices
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
/*
* Assign `threads_per_block` and `number_of_blocks` 2D values
* that can be used in matrixMulGPU above.
*/
dim3 threads_per_block;
dim3 number_of_blocks;
matrixMulGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu );
cudaDeviceSynchronize();
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
cudaFree(a); cudaFree(b);
cudaFree( c_cpu ); cudaFree( c_gpu );
}
|
970f75ee3ef9b8d3f7754b4241dabc90df3a1d5a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
int main( void ) {
hipDeviceProp_t prop;
int count, error;
error = hipGetDeviceCount(&count);
if (error != hipSuccess) {
printf("hipGetDeviceCount error %d. ", error);
if (error == hipErrorNoDevice)
printf("hipErrorNoDevice. \n");
if (error == hipErrorInsufficientDriver)
printf("hipErrorInsufficientDriver. \n");
return(error);
}
printf("Number of devices: %d", count);
for (int i=0; i < count; i++) {
hipGetDeviceProperties(&prop,i);
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate (KHz): %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
getchar();
return(0);
}
|
970f75ee3ef9b8d3f7754b4241dabc90df3a1d5a.cu
|
#include <stdio.h>
#include <stdlib.h>
int main( void ) {
cudaDeviceProp prop;
int count, error;
error = cudaGetDeviceCount(&count);
if (error != cudaSuccess) {
printf("cudaGetDeviceCount error %d. ", error);
if (error == cudaErrorNoDevice)
printf("cudaErrorNoDevice. \n");
if (error == cudaErrorInsufficientDriver)
printf("cudaErrorInsufficientDriver. \n");
return(error);
}
printf("Number of devices: %d", count);
for (int i=0; i < count; i++) {
cudaGetDeviceProperties(&prop,i);
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate (KHz): %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
getchar();
return(0);
}
|
cad8251f06a6464ce111503e9bf7dcbf3ce9c031.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "gtest/gtest.h"
#include <cstdlib>
#include <iostream>
#include <vector>
#include <chrono>
#include <thrust/device_vector.h>
#include "gtest/gtest.h"
#include <cudf.h>
#include <utilities/cudf_utils.h>
#include <cudf/functions.h>
#include "tests/utilities/cudf_test_utils.cuh"
// uncomment to enable benchmarking gdf_column_concat
//#define ENABLE_CONCAT_BENCHMARK
template <typename T>
struct print {
__device__ void operator()(T x) { printf("%x ", x); }
};
struct ColumnConcatTest : public testing::Test
{
ColumnConcatTest() {}
~ColumnConcatTest() {}
template <typename T, typename data_initializer_t, typename null_initializer_t>
void multicolumn_test(std::vector<size_t> column_sizes,
data_initializer_t data_init,
null_initializer_t null_init)
{
std::vector< std::vector<T> > the_columns(column_sizes.size());
for (size_t i = 0; i < column_sizes.size(); ++i)
initialize_vector(the_columns[i], column_sizes[i], data_init);
// This is just an alias to a gdf_column with a custom deleter that will free
// the data and valid fields when the unique_ptr goes out of scope
using gdf_col_pointer = typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
// Copies the random data from each host vector in the_columns to the device in a gdf_column.
// Each gdf_column's validity bit i will be initialized with the lambda
std::vector<gdf_col_pointer> gdf_columns = initialize_gdf_columns(the_columns, null_init);
std::vector<gdf_column*> raw_gdf_columns;
for(auto const & c : gdf_columns) {
raw_gdf_columns.push_back(c.get());
}
gdf_column **columns_to_concat = raw_gdf_columns.data();
int num_columns = raw_gdf_columns.size();
gdf_size_type total_size = 0;
for (auto sz : column_sizes) total_size += sz;
std::vector<T> output_data(total_size);
std::vector<gdf_valid_type> output_valid(gdf_get_num_chars_bitmask(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
EXPECT_EQ( GDF_SUCCESS, gdf_column_concat(output_gdf_col.get(),
columns_to_concat,
num_columns) );
// make a concatenated reference
std::vector<T> ref_data;
for (size_t i = 0; i < the_columns.size(); ++i)
std::copy(the_columns[i].begin(), the_columns[i].end(), std::back_inserter(ref_data));
gdf_size_type ref_null_count = 0;
std::vector<gdf_valid_type> ref_valid(gdf_get_num_chars_bitmask(total_size));
for (gdf_size_type index = 0, col = 0, row = 0; index < total_size; ++index)
{
if (null_init(row, col)) gdf::util::turn_bit_on(ref_valid.data(), index);
else ref_null_count++;
if (++row >= column_sizes[col]) { row = 0; col++; }
}
auto ref_gdf_col = create_gdf_column(ref_data, ref_valid);
EXPECT_EQ(ref_null_count, ref_gdf_col->null_count);
EXPECT_TRUE(gdf_equal_columns<int>(ref_gdf_col.get(), output_gdf_col.get()));
//print_valid_data(ref_valid.data(), total_size); printf("\n");
//print_valid_data(output_gdf_col->valid, total_size);
}
template <typename T, typename data_initializer_t, typename null_initializer_t>
void multicolumn_bench(std::vector<size_t> column_sizes,
data_initializer_t data_init,
null_initializer_t null_init)
{
std::vector< std::vector<T> > the_columns(column_sizes.size());
for (size_t i = 0; i < column_sizes.size(); ++i)
initialize_vector(the_columns[i], column_sizes[i], data_init);
std::vector<gdf_col_pointer> gdf_columns = initialize_gdf_columns(the_columns, null_init);
std::vector<gdf_column*> raw_gdf_columns;
for(auto const & c : gdf_columns) {
raw_gdf_columns.push_back(c.get());
}
gdf_column **columns_to_concat = raw_gdf_columns.data();
int num_columns = raw_gdf_columns.size();
gdf_size_type total_size = 0;
for (auto sz : column_sizes) total_size += sz;
std::vector<int32_t> output_data(total_size);
std::vector<gdf_valid_type> output_valid(gdf_get_num_chars_bitmask(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
auto start = std::chrono::high_resolution_clock::now();
EXPECT_EQ( GDF_SUCCESS, gdf_column_concat(output_gdf_col.get(),
columns_to_concat,
num_columns) );
int num = 100;
for (int i = 0; i < num; ++i) {
gdf_column_concat(output_gdf_col.get(), columns_to_concat, num_columns);
}
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end-start;
std::cout << "Time for " << num << " concats of " << num_columns << " columns of "
<< total_size << " total elements:\n";
std::cout << diff.count() << " s\n";
}
};
// Test various cases with null pointers or empty columns
TEST_F(ColumnConcatTest, ErrorConditions)
{
constexpr int num_columns = 4;
// Test null output column
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(0, 0, 0));
std::vector<gdf_size_type> column_sizes{4, 1, 2, 3};
gdf_size_type total_size = 0;
for (auto& n : column_sizes)
total_size += n;
gdf_column **input_columns = new gdf_column*[num_columns];
for (int i = 0; i < num_columns; ++i) {
input_columns[i] = 0;
}
std::vector<int32_t> output_data(total_size);
std::vector<gdf_valid_type> output_valid(gdf_get_num_chars_bitmask(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
// Test array of null input columns
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(output_gdf_col.get(), input_columns, num_columns));
for (int i = 0; i < num_columns; ++i) {
gdf_column col;
EXPECT_EQ(GDF_SUCCESS, gdf_column_view(&col, 0, 0, column_sizes[i], GDF_INT32));
input_columns[i] = &col;
}
// test null input column data / valid pointers
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(output_gdf_col.get(), input_columns, num_columns));
// create some actual input columns
for (int i = 0; i < num_columns; ++i) {
gdf_size_type size = column_sizes[i];
std::vector<int32_t> data(size);
std::vector<gdf_valid_type> valid(gdf_get_num_chars_bitmask(size));
input_columns[i] = create_gdf_column(data, valid).get();
}
// test mismatched sizes
output_gdf_col->size = total_size - 1;
EXPECT_EQ(GDF_COLUMN_SIZE_MISMATCH, gdf_column_concat(output_gdf_col.get(), input_columns, num_columns));
}
TEST_F(ColumnConcatTest, RandomData) {
gdf_size_type column_size = 1005;
gdf_size_type null_interval = 17;
std::vector<size_t> column_sizes{column_size, column_size, column_size};
multicolumn_test<int>(column_sizes,
[](int index){ return std::rand(); },
[null_interval](gdf_size_type row, gdf_size_type col) {
return (row % null_interval) != 0;
});
}
TEST_F(ColumnConcatTest, DifferentLengthColumns) {
gdf_size_type null_interval = 2;
std::vector<size_t> column_sizes{13, 3, 5};
multicolumn_test<int>(column_sizes,
[](int index){ return std::rand(); },
[null_interval](gdf_size_type row, gdf_size_type col) {
return (row % null_interval) != 0;
});
}
TEST_F(ColumnConcatTest, DifferentLengthColumnsLimitedBits) {
std::vector<size_t> column_sizes{13, 3, 5};
auto limited_bits = [column_sizes](gdf_size_type row, gdf_size_type col){
return row < column_sizes[col];
};
multicolumn_test<int>(column_sizes,
[](int index){ return std::rand(); },
limited_bits);
}
TEST_F(ColumnConcatTest, MoreComplicatedColumns) {
std::vector<size_t> column_sizes{5, 1003, 17, 117};
auto bit_setter = [column_sizes](gdf_size_type row, gdf_size_type col) {
switch (col) {
case 0:
return (row % 2) != 0; // column 0 has odd bits set
case 1:
return row < column_sizes[col];
case 2:
return (row % 17) != 0;
case 3:
return row < 3;
}
return true;
};
multicolumn_test<int>(column_sizes,
[](int index){ return std::rand(); },
bit_setter);
}
TEST_F(ColumnConcatTest, EightByteColumns) {
std::vector<size_t> column_sizes{13, 3, 5};
auto limited_bits = [column_sizes](gdf_size_type row, gdf_size_type col){
return row < column_sizes[col];
};
multicolumn_test<int64_t>(column_sizes,
[](int index){ return std::rand(); },
limited_bits);
}
#ifdef ENABLE_CONCAT_BENCHMARK
TEST_F(ColumnConcatTest, Benchmark) {
size_t n = 42000000;
std::vector<size_t> column_sizes{n, n, n, n};
gdf_size_type null_interval = 17;
auto bit_setter = [null_interval](gdf_size_type row, gdf_size_type col) {
return (row % null_interval) != 0;
};
multicolumn_bench<int>(column_sizes,
[](int index){ return std::rand(); },
bit_setter);
}
#endif // ENABLE_CONCAT_BENCHMARK
|
cad8251f06a6464ce111503e9bf7dcbf3ce9c031.cu
|
#include "gtest/gtest.h"
#include <cstdlib>
#include <iostream>
#include <vector>
#include <chrono>
#include <thrust/device_vector.h>
#include "gtest/gtest.h"
#include <cudf.h>
#include <utilities/cudf_utils.h>
#include <cudf/functions.h>
#include "tests/utilities/cudf_test_utils.cuh"
// uncomment to enable benchmarking gdf_column_concat
//#define ENABLE_CONCAT_BENCHMARK
template <typename T>
struct print {
__device__ void operator()(T x) { printf("%x ", x); }
};
struct ColumnConcatTest : public testing::Test
{
ColumnConcatTest() {}
~ColumnConcatTest() {}
template <typename T, typename data_initializer_t, typename null_initializer_t>
void multicolumn_test(std::vector<size_t> column_sizes,
data_initializer_t data_init,
null_initializer_t null_init)
{
std::vector< std::vector<T> > the_columns(column_sizes.size());
for (size_t i = 0; i < column_sizes.size(); ++i)
initialize_vector(the_columns[i], column_sizes[i], data_init);
// This is just an alias to a gdf_column with a custom deleter that will free
// the data and valid fields when the unique_ptr goes out of scope
using gdf_col_pointer = typename std::unique_ptr<gdf_column, std::function<void(gdf_column*)>>;
// Copies the random data from each host vector in the_columns to the device in a gdf_column.
// Each gdf_column's validity bit i will be initialized with the lambda
std::vector<gdf_col_pointer> gdf_columns = initialize_gdf_columns(the_columns, null_init);
std::vector<gdf_column*> raw_gdf_columns;
for(auto const & c : gdf_columns) {
raw_gdf_columns.push_back(c.get());
}
gdf_column **columns_to_concat = raw_gdf_columns.data();
int num_columns = raw_gdf_columns.size();
gdf_size_type total_size = 0;
for (auto sz : column_sizes) total_size += sz;
std::vector<T> output_data(total_size);
std::vector<gdf_valid_type> output_valid(gdf_get_num_chars_bitmask(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
EXPECT_EQ( GDF_SUCCESS, gdf_column_concat(output_gdf_col.get(),
columns_to_concat,
num_columns) );
// make a concatenated reference
std::vector<T> ref_data;
for (size_t i = 0; i < the_columns.size(); ++i)
std::copy(the_columns[i].begin(), the_columns[i].end(), std::back_inserter(ref_data));
gdf_size_type ref_null_count = 0;
std::vector<gdf_valid_type> ref_valid(gdf_get_num_chars_bitmask(total_size));
for (gdf_size_type index = 0, col = 0, row = 0; index < total_size; ++index)
{
if (null_init(row, col)) gdf::util::turn_bit_on(ref_valid.data(), index);
else ref_null_count++;
if (++row >= column_sizes[col]) { row = 0; col++; }
}
auto ref_gdf_col = create_gdf_column(ref_data, ref_valid);
EXPECT_EQ(ref_null_count, ref_gdf_col->null_count);
EXPECT_TRUE(gdf_equal_columns<int>(ref_gdf_col.get(), output_gdf_col.get()));
//print_valid_data(ref_valid.data(), total_size); printf("\n");
//print_valid_data(output_gdf_col->valid, total_size);
}
template <typename T, typename data_initializer_t, typename null_initializer_t>
void multicolumn_bench(std::vector<size_t> column_sizes,
data_initializer_t data_init,
null_initializer_t null_init)
{
std::vector< std::vector<T> > the_columns(column_sizes.size());
for (size_t i = 0; i < column_sizes.size(); ++i)
initialize_vector(the_columns[i], column_sizes[i], data_init);
std::vector<gdf_col_pointer> gdf_columns = initialize_gdf_columns(the_columns, null_init);
std::vector<gdf_column*> raw_gdf_columns;
for(auto const & c : gdf_columns) {
raw_gdf_columns.push_back(c.get());
}
gdf_column **columns_to_concat = raw_gdf_columns.data();
int num_columns = raw_gdf_columns.size();
gdf_size_type total_size = 0;
for (auto sz : column_sizes) total_size += sz;
std::vector<int32_t> output_data(total_size);
std::vector<gdf_valid_type> output_valid(gdf_get_num_chars_bitmask(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
auto start = std::chrono::high_resolution_clock::now();
EXPECT_EQ( GDF_SUCCESS, gdf_column_concat(output_gdf_col.get(),
columns_to_concat,
num_columns) );
int num = 100;
for (int i = 0; i < num; ++i) {
gdf_column_concat(output_gdf_col.get(), columns_to_concat, num_columns);
}
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end-start;
std::cout << "Time for " << num << " concats of " << num_columns << " columns of "
<< total_size << " total elements:\n";
std::cout << diff.count() << " s\n";
}
};
// Test various cases with null pointers or empty columns
TEST_F(ColumnConcatTest, ErrorConditions)
{
constexpr int num_columns = 4;
// Test null output column
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(0, 0, 0));
std::vector<gdf_size_type> column_sizes{4, 1, 2, 3};
gdf_size_type total_size = 0;
for (auto& n : column_sizes)
total_size += n;
gdf_column **input_columns = new gdf_column*[num_columns];
for (int i = 0; i < num_columns; ++i) {
input_columns[i] = 0;
}
std::vector<int32_t> output_data(total_size);
std::vector<gdf_valid_type> output_valid(gdf_get_num_chars_bitmask(total_size));
auto output_gdf_col = create_gdf_column(output_data, output_valid);
// Test array of null input columns
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(output_gdf_col.get(), input_columns, num_columns));
for (int i = 0; i < num_columns; ++i) {
gdf_column col;
EXPECT_EQ(GDF_SUCCESS, gdf_column_view(&col, 0, 0, column_sizes[i], GDF_INT32));
input_columns[i] = &col;
}
// test null input column data / valid pointers
EXPECT_EQ(GDF_DATASET_EMPTY, gdf_column_concat(output_gdf_col.get(), input_columns, num_columns));
// create some actual input columns
for (int i = 0; i < num_columns; ++i) {
gdf_size_type size = column_sizes[i];
std::vector<int32_t> data(size);
std::vector<gdf_valid_type> valid(gdf_get_num_chars_bitmask(size));
input_columns[i] = create_gdf_column(data, valid).get();
}
// test mismatched sizes
output_gdf_col->size = total_size - 1;
EXPECT_EQ(GDF_COLUMN_SIZE_MISMATCH, gdf_column_concat(output_gdf_col.get(), input_columns, num_columns));
}
TEST_F(ColumnConcatTest, RandomData) {
gdf_size_type column_size = 1005;
gdf_size_type null_interval = 17;
std::vector<size_t> column_sizes{column_size, column_size, column_size};
multicolumn_test<int>(column_sizes,
[](int index){ return std::rand(); },
[null_interval](gdf_size_type row, gdf_size_type col) {
return (row % null_interval) != 0;
});
}
TEST_F(ColumnConcatTest, DifferentLengthColumns) {
gdf_size_type null_interval = 2;
std::vector<size_t> column_sizes{13, 3, 5};
multicolumn_test<int>(column_sizes,
[](int index){ return std::rand(); },
[null_interval](gdf_size_type row, gdf_size_type col) {
return (row % null_interval) != 0;
});
}
TEST_F(ColumnConcatTest, DifferentLengthColumnsLimitedBits) {
std::vector<size_t> column_sizes{13, 3, 5};
auto limited_bits = [column_sizes](gdf_size_type row, gdf_size_type col){
return row < column_sizes[col];
};
multicolumn_test<int>(column_sizes,
[](int index){ return std::rand(); },
limited_bits);
}
TEST_F(ColumnConcatTest, MoreComplicatedColumns) {
std::vector<size_t> column_sizes{5, 1003, 17, 117};
auto bit_setter = [column_sizes](gdf_size_type row, gdf_size_type col) {
switch (col) {
case 0:
return (row % 2) != 0; // column 0 has odd bits set
case 1:
return row < column_sizes[col];
case 2:
return (row % 17) != 0;
case 3:
return row < 3;
}
return true;
};
multicolumn_test<int>(column_sizes,
[](int index){ return std::rand(); },
bit_setter);
}
TEST_F(ColumnConcatTest, EightByteColumns) {
std::vector<size_t> column_sizes{13, 3, 5};
auto limited_bits = [column_sizes](gdf_size_type row, gdf_size_type col){
return row < column_sizes[col];
};
multicolumn_test<int64_t>(column_sizes,
[](int index){ return std::rand(); },
limited_bits);
}
#ifdef ENABLE_CONCAT_BENCHMARK
TEST_F(ColumnConcatTest, Benchmark) {
size_t n = 42000000;
std::vector<size_t> column_sizes{n, n, n, n};
gdf_size_type null_interval = 17;
auto bit_setter = [null_interval](gdf_size_type row, gdf_size_type col) {
return (row % null_interval) != 0;
};
multicolumn_bench<int>(column_sizes,
[](int index){ return std::rand(); },
bit_setter);
}
#endif // ENABLE_CONCAT_BENCHMARK
|
20dedd599c98defd25bd12116613aedff0ea1d4d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/utils/gpu/helpers.hpp"
#ifdef LBANN_HAS_CUDA
namespace lbann {
namespace gpu_lib {
// -------------------------------------------------------------
// Device properties
// -------------------------------------------------------------
dim3 max_grid_dims() {
static dim3 max_grid_dims_(0,0,0);
if (max_grid_dims_.x == 0) {
int device = 0;
hipDeviceProp_t prop;
CHECK_CUDA(hipGetDevice(&device));
CHECK_CUDA(hipGetDeviceProperties(&prop, device));
max_grid_dims_.x = prop.maxGridSize[0];
max_grid_dims_.y = prop.maxGridSize[1];
max_grid_dims_.z = prop.maxGridSize[2];
if (max_grid_dims_.x == 0) {
LBANN_ERROR("Could not setup max CUDA grid size");
}
}
return max_grid_dims_;
}
} // namespace gpu_lib
} // namespace lbann
namespace lbann {
namespace cuda {
// -------------------------------------------------------------
// event_wrapper
// -------------------------------------------------------------
event_wrapper::event_wrapper() : m_event(nullptr), m_stream(0) {
CHECK_CUDA(hipEventCreateWithFlags(&m_event, hipEventDisableTiming));
}
event_wrapper::event_wrapper(const event_wrapper& other)
: m_event(nullptr), m_stream(other.m_stream) {
CHECK_CUDA(hipEventCreateWithFlags(&m_event, hipEventDisableTiming));
if (!other.query()) { record(m_stream); }
}
event_wrapper& event_wrapper::operator=(const event_wrapper& other) {
m_stream = other.m_stream;
if (!other.query()) { record(m_stream); }
return *this;
}
event_wrapper::~event_wrapper() {
hipEventDestroy(m_event);
}
void event_wrapper::record(hipStream_t stream) {
m_stream = stream;
CHECK_CUDA(hipEventRecord(m_event, m_stream));
}
bool event_wrapper::query() const {
const auto& status = hipEventQuery(m_event);
switch (status) {
case hipSuccess: return true;
case hipErrorNotReady: return false;
default:
CHECK_CUDA(status);
return false;
}
}
void event_wrapper::synchronize() {
CHECK_CUDA(hipEventSynchronize(m_event));
}
hipEvent_t& event_wrapper::get_event() { return m_event; }
// -----------------------------
// Graph
// -----------------------------
Graph::Graph(hipGraph_t graph)
: graph_{graph}
{}
Graph::~Graph() {
if (graph_) {
// Don't check status to avoid exceptions
hipGraphDestroy(graph_);
}
}
Graph::Graph(const Graph& other) {
if (other.graph_) {
CHECK_CUDA(cudaGraphClone(&graph_, other.graph_));
}
}
Graph::Graph(Graph&& other)
: graph_{other.graph_} {
other.graph_ = nullptr;
}
Graph& Graph::operator=(Graph other) {
swap(other, *this);
return *this;
}
void swap(Graph& first, Graph& second) {
std::swap(first.graph_, second.graph_);
}
void Graph::reset(hipGraph_t graph) {
if (graph_) {
CHECK_CUDA(hipGraphDestroy(graph_));
}
graph_ = graph;
}
hipGraph_t Graph::release() {
auto old_graph = graph_;
graph_ = nullptr;
return old_graph;
}
hipGraph_t Graph::get() const noexcept {
return graph_;
}
Graph::operator hipGraph_t() const noexcept {
return get();
}
void Graph::create() {
if (!graph_) {
CHECK_CUDA(cudaGraphCreate(&graph_, 0));
}
}
void Graph::begin_capture(
hipStream_t stream,
hipStreamCaptureMode mode) {
// Check that stream is valid
// Note (tym 9/22/20): As of CUDA 11.0.3, support for stream capture
// on default stream is not supported.
if (stream == 0) {
LBANN_ERROR("attempting to capture default CUDA stream");
}
// Check whether CUDA stream is already being captured
hipStreamCaptureStatus capture_status;
CHECK_CUDA(hipStreamIsCapturing(stream, &capture_status));
switch (capture_status) {
case cudaStreamCaptureStatusNone:
break;
case hipStreamCaptureStatusActive:
LBANN_ERROR("CUDA stream is already being captured");
break;
case cudaStreamCaptureStatusInvalidated:
{
hipGraph_t graph;
CHECK_CUDA(hipStreamEndCapture(stream, &graph));
Graph temp(graph);
}
break;
default:
LBANN_ERROR(
"unrecognized status for CUDA stream capture ",
"(",static_cast<int>(capture_status),")");
}
// Start capturing CUDA stream
CHECK_CUDA(hipStreamBeginCapture(stream, mode));
}
Graph Graph::end_capture(hipStream_t stream) {
// Check whether CUDA stream is already being captured
hipStreamCaptureStatus capture_status;
CHECK_CUDA(hipStreamIsCapturing(stream, &capture_status));
switch (capture_status) {
case cudaStreamCaptureStatusNone:
LBANN_ERROR("CUDA stream is not being captured");
break;
case hipStreamCaptureStatusActive:
break;
case cudaStreamCaptureStatusInvalidated:
{
hipGraph_t graph;
CHECK_CUDA(hipStreamEndCapture(stream, &graph));
Graph temp(graph);
LBANN_ERROR("CUDA stream capture has failed");
}
break;
default:
LBANN_ERROR(
"unrecognized status for CUDA stream capture ",
"(",static_cast<int>(capture_status),")");
}
// Finish capturing CUDA stream
hipGraph_t graph;
CHECK_CUDA(hipStreamEndCapture(stream, &graph));
return Graph(graph);
}
// -----------------------------
// ExecutableGraph
// -----------------------------
ExecutableGraph::ExecutableGraph(hipGraphExec_t graph_exec)
: graph_exec_{graph_exec}
{}
ExecutableGraph::ExecutableGraph(hipGraph_t graph) {
if (!graph) {
LBANN_ERROR("attempted to instantiate hipGraphExec_t from null hipGraph_t object");
}
constexpr size_t log_size = BUFSIZ;
char log_buffer[log_size];
const auto status
= hipGraphInstantiate(&graph_exec_, graph, nullptr, log_buffer, log_size);
if (status != hipSuccess && log_buffer[0] != '\0') {
log_buffer[log_size-1] = '\0';
LBANN_WARNING(log_buffer);
}
CHECK_CUDA(status);
}
ExecutableGraph::~ExecutableGraph() {
if (graph_exec_) {
// Don't check status to avoid exceptions
hipGraphExecDestroy(graph_exec_);
}
}
ExecutableGraph::ExecutableGraph(ExecutableGraph&& other)
: graph_exec_{other.graph_exec_} {
other.graph_exec_ = nullptr;
}
ExecutableGraph& ExecutableGraph::operator=(ExecutableGraph other) {
swap(other, *this);
return *this;
}
void swap(ExecutableGraph& first, ExecutableGraph& second) {
std::swap(first.graph_exec_, second.graph_exec_);
}
void ExecutableGraph::reset(hipGraphExec_t graph_exec) {
if (graph_exec_) {
CHECK_CUDA(hipGraphExecDestroy(graph_exec_));
}
graph_exec_ = graph_exec;
}
hipGraphExec_t ExecutableGraph::release() {
auto old_graph_exec = graph_exec_;
graph_exec_ = nullptr;
return old_graph_exec;
}
hipGraphExec_t ExecutableGraph::get() const noexcept {
return graph_exec_;
}
ExecutableGraph::operator hipGraphExec_t() const noexcept {
return get();
}
void ExecutableGraph::launch(hipStream_t stream) const {
if (!graph_exec_) {
LBANN_ERROR("attempted to launch null hipGraphExec_t");
}
CHECK_CUDA(hipGraphLaunch(graph_exec_, stream));
}
void ExecutableGraph::update(hipGraph_t graph) {
// Make sure CUDA graph is valid
if (!graph) {
LBANN_ERROR("attempting to update hipGraphExec_t with null hipGraph_t");
}
// Try updating executable CUDA graph
#if (__CUDACC_VER_MAJOR__*100+__CUDACC_VER_MINOR__) < 1002 // < 10.2
reset();
#else // >= 10.2
if (graph_exec_) {
hipGraphNode_t error_node;
cudaGraphExecUpdateResult result;
auto status = cudaGraphExecUpdate(graph_exec_, graph, &error_node, &result);
switch (status) {
case hipSuccess:
break;
case cudaErrorGraphExecUpdateFailure:
reset();
break;
default:
CHECK_CUDA(status);
reset();
}
}
#endif // CUDA version >= 10.02
// If update failed, create new executable CUDA graph
if (!graph_exec_) {
*this = ExecutableGraph(graph);
}
}
// -------------------------------------------------------------
// Helper functions for tensor operations
// -------------------------------------------------------------
namespace {
using int4 = gpu_lib::array<int, 4>;
/**
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (dim[3] / bdimx) x (dim[2] / bdimy) x (dim[1] / bdimx)
*/
template <typename TensorDataType>
__global__ void copy_4d_kernel(
int4 dims,
const TensorDataType* __restrict__ input,
int4 input_strides,
TensorDataType* __restrict__ output,
int4 output_strides) {
// Indices
const auto& gidx = threadIdx.x + blockIdx.x * blockDim.x;
const auto& gidy = threadIdx.y + blockIdx.y * blockDim.y;
const auto& gidz = threadIdx.z + blockIdx.z * blockDim.z;
const auto& nthreadsx = gridDim.x * blockDim.x;
const auto& nthreadsy = gridDim.y * blockDim.y;
const auto& nthreadsz = gridDim.z * blockDim.z;
for (int i0=0; i0<dims[0]; ++i0) {
for (int i1=gidz; i1<dims[1]; i1+=nthreadsz) {
for (int i2=gidy; i2<dims[2]; i2+=nthreadsy) {
for (int i3=gidx; i3<dims[3]; i3+=nthreadsx) {
const auto& x = input[i0 * input_strides[0]
+ i1 * input_strides[1]
+ i2 * input_strides[2]
+ i3 * input_strides[3]];
auto& y = output[i0 * output_strides[0]
+ i1 * output_strides[1]
+ i2 * output_strides[2]
+ i3 * output_strides[3]];
y = x;
}
}
}
}
}
} // namespace <anon>
template <typename TensorDataType>
void copy_tensor(
hipStream_t stream,
const std::vector<size_t>& dims,
const TensorDataType* input,
const std::vector<size_t>& input_strides,
TensorDataType* output,
const std::vector<size_t>& output_strides) {
// Check inputs
if (dims.empty() || dims.size() > 4) {
LBANN_ERROR("invalid number of tensor dimensions (",dims.size(),")");
}
if (dims.size() != input_strides.size()) {
LBANN_ERROR(
"number of input strides (",input_strides.size(),") ",
"does not match number of tensor dimensions (",dims.size(),")");
}
if (dims.size() != output_strides.size()) {
LBANN_ERROR(
"number of output strides (",output_strides.size(),") ",
"does not match number of tensor dimensions (",dims.size(),")");
}
// Pad tensor dimensions to 4D
std::vector<int>
rdims(dims.rbegin(), dims.rend()),
input_rstrides(input_strides.rbegin(), input_strides.rend()),
output_rstrides(output_strides.rbegin(), output_strides.rend());
rdims.resize(4, 1);
input_rstrides.resize(4, input_rstrides.back());
output_rstrides.resize(4, output_rstrides.back());
// Launch CUDA kernel
const auto size = std::accumulate(
dims.begin(), dims.end(), 1, std::multiplies<int>());
if (size > 0) {
constexpr size_t block_size = 64;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.y = 1;
block_dims.z = 1;
grid_dims.x = (rdims[0] + block_dims.x - 1) / block_dims.x;
grid_dims.y = (rdims[1] + block_dims.y - 1) / block_dims.y;
grid_dims.z = (rdims[2] + block_dims.z - 1) / block_dims.z;
grid_dims.y = El::Min(grid_dims.y, 65535);
grid_dims.z = El::Min(grid_dims.z, 65535);
hipLaunchKernelGGL(( copy_4d_kernel), dim3(grid_dims), dim3(block_dims), 0, stream,
{rdims[3], rdims[2], rdims[1], rdims[0]},
input,
{input_rstrides[3], input_rstrides[2],
input_rstrides[1], input_rstrides[0]},
output,
{output_rstrides[3], output_rstrides[2],
output_rstrides[1], output_rstrides[0]});
}
}
#if defined(LBANN_HAS_HALF) && defined(LBANN_HAS_GPU_HALF)
template <>
void copy_tensor<cpu_fp16>(
hipStream_t stream,
const std::vector<size_t>& dims,
const cpu_fp16* input,
const std::vector<size_t>& input_strides,
cpu_fp16* output,
const std::vector<size_t>& output_strides) {
copy_tensor<fp16>(
stream,
dims,
reinterpret_cast<const fp16*>(input),
input_strides,
reinterpret_cast<fp16*>(output),
output_strides);
}
#endif // defined(LBANN_HAS_HALF) && defined(LBANN_HAS_GPU_HALF)
// Explicit template instantiation
#define PROTO(T) \
template void copy_tensor<T>( \
hipStream_t stream, \
const std::vector<size_t>& dims, \
const T* input, \
const std::vector<size_t>& input_strides, \
T* output, \
const std::vector<size_t>& output_strides);
#define LBANN_INSTANTIATE_GPU_HALF
#define LBANN_INSTANTIATE_CPU_HALF
#include "lbann/macros/instantiate.hpp"
#undef PROTO
void mem_copy_async(
void* output,
const void* input,
const size_t count,
hipMemcpyKind kind,
hipStream_t stream) {
CHECK_CUDA(hipMemcpyAsync(
output,
input,
count,
kind,
stream));
}
} // namespace cuda
} // namespace lbann
#endif // LBANN_HAS_CUDA
|
20dedd599c98defd25bd12116613aedff0ea1d4d.cu
|
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/utils/gpu/helpers.hpp"
#ifdef LBANN_HAS_CUDA
namespace lbann {
namespace gpu_lib {
// -------------------------------------------------------------
// Device properties
// -------------------------------------------------------------
dim3 max_grid_dims() {
static dim3 max_grid_dims_(0,0,0);
if (max_grid_dims_.x == 0) {
int device = 0;
cudaDeviceProp prop;
CHECK_CUDA(cudaGetDevice(&device));
CHECK_CUDA(cudaGetDeviceProperties(&prop, device));
max_grid_dims_.x = prop.maxGridSize[0];
max_grid_dims_.y = prop.maxGridSize[1];
max_grid_dims_.z = prop.maxGridSize[2];
if (max_grid_dims_.x == 0) {
LBANN_ERROR("Could not setup max CUDA grid size");
}
}
return max_grid_dims_;
}
} // namespace gpu_lib
} // namespace lbann
namespace lbann {
namespace cuda {
// -------------------------------------------------------------
// event_wrapper
// -------------------------------------------------------------
event_wrapper::event_wrapper() : m_event(nullptr), m_stream(0) {
CHECK_CUDA(cudaEventCreateWithFlags(&m_event, cudaEventDisableTiming));
}
event_wrapper::event_wrapper(const event_wrapper& other)
: m_event(nullptr), m_stream(other.m_stream) {
CHECK_CUDA(cudaEventCreateWithFlags(&m_event, cudaEventDisableTiming));
if (!other.query()) { record(m_stream); }
}
event_wrapper& event_wrapper::operator=(const event_wrapper& other) {
m_stream = other.m_stream;
if (!other.query()) { record(m_stream); }
return *this;
}
event_wrapper::~event_wrapper() {
cudaEventDestroy(m_event);
}
void event_wrapper::record(cudaStream_t stream) {
m_stream = stream;
CHECK_CUDA(cudaEventRecord(m_event, m_stream));
}
bool event_wrapper::query() const {
const auto& status = cudaEventQuery(m_event);
switch (status) {
case cudaSuccess: return true;
case cudaErrorNotReady: return false;
default:
CHECK_CUDA(status);
return false;
}
}
void event_wrapper::synchronize() {
CHECK_CUDA(cudaEventSynchronize(m_event));
}
cudaEvent_t& event_wrapper::get_event() { return m_event; }
// -----------------------------
// Graph
// -----------------------------
Graph::Graph(cudaGraph_t graph)
: graph_{graph}
{}
Graph::~Graph() {
if (graph_) {
// Don't check status to avoid exceptions
cudaGraphDestroy(graph_);
}
}
Graph::Graph(const Graph& other) {
if (other.graph_) {
CHECK_CUDA(cudaGraphClone(&graph_, other.graph_));
}
}
Graph::Graph(Graph&& other)
: graph_{other.graph_} {
other.graph_ = nullptr;
}
Graph& Graph::operator=(Graph other) {
swap(other, *this);
return *this;
}
void swap(Graph& first, Graph& second) {
std::swap(first.graph_, second.graph_);
}
void Graph::reset(cudaGraph_t graph) {
if (graph_) {
CHECK_CUDA(cudaGraphDestroy(graph_));
}
graph_ = graph;
}
cudaGraph_t Graph::release() {
auto old_graph = graph_;
graph_ = nullptr;
return old_graph;
}
cudaGraph_t Graph::get() const noexcept {
return graph_;
}
Graph::operator cudaGraph_t() const noexcept {
return get();
}
void Graph::create() {
if (!graph_) {
CHECK_CUDA(cudaGraphCreate(&graph_, 0));
}
}
void Graph::begin_capture(
cudaStream_t stream,
cudaStreamCaptureMode mode) {
// Check that stream is valid
// Note (tym 9/22/20): As of CUDA 11.0.3, support for stream capture
// on default stream is not supported.
if (stream == 0) {
LBANN_ERROR("attempting to capture default CUDA stream");
}
// Check whether CUDA stream is already being captured
cudaStreamCaptureStatus capture_status;
CHECK_CUDA(cudaStreamIsCapturing(stream, &capture_status));
switch (capture_status) {
case cudaStreamCaptureStatusNone:
break;
case cudaStreamCaptureStatusActive:
LBANN_ERROR("CUDA stream is already being captured");
break;
case cudaStreamCaptureStatusInvalidated:
{
cudaGraph_t graph;
CHECK_CUDA(cudaStreamEndCapture(stream, &graph));
Graph temp(graph);
}
break;
default:
LBANN_ERROR(
"unrecognized status for CUDA stream capture ",
"(",static_cast<int>(capture_status),")");
}
// Start capturing CUDA stream
CHECK_CUDA(cudaStreamBeginCapture(stream, mode));
}
Graph Graph::end_capture(cudaStream_t stream) {
// Check whether CUDA stream is already being captured
cudaStreamCaptureStatus capture_status;
CHECK_CUDA(cudaStreamIsCapturing(stream, &capture_status));
switch (capture_status) {
case cudaStreamCaptureStatusNone:
LBANN_ERROR("CUDA stream is not being captured");
break;
case cudaStreamCaptureStatusActive:
break;
case cudaStreamCaptureStatusInvalidated:
{
cudaGraph_t graph;
CHECK_CUDA(cudaStreamEndCapture(stream, &graph));
Graph temp(graph);
LBANN_ERROR("CUDA stream capture has failed");
}
break;
default:
LBANN_ERROR(
"unrecognized status for CUDA stream capture ",
"(",static_cast<int>(capture_status),")");
}
// Finish capturing CUDA stream
cudaGraph_t graph;
CHECK_CUDA(cudaStreamEndCapture(stream, &graph));
return Graph(graph);
}
// -----------------------------
// ExecutableGraph
// -----------------------------
ExecutableGraph::ExecutableGraph(cudaGraphExec_t graph_exec)
: graph_exec_{graph_exec}
{}
ExecutableGraph::ExecutableGraph(cudaGraph_t graph) {
if (!graph) {
LBANN_ERROR("attempted to instantiate cudaGraphExec_t from null cudaGraph_t object");
}
constexpr size_t log_size = BUFSIZ;
char log_buffer[log_size];
const auto status
= cudaGraphInstantiate(&graph_exec_, graph, nullptr, log_buffer, log_size);
if (status != cudaSuccess && log_buffer[0] != '\0') {
log_buffer[log_size-1] = '\0';
LBANN_WARNING(log_buffer);
}
CHECK_CUDA(status);
}
ExecutableGraph::~ExecutableGraph() {
if (graph_exec_) {
// Don't check status to avoid exceptions
cudaGraphExecDestroy(graph_exec_);
}
}
ExecutableGraph::ExecutableGraph(ExecutableGraph&& other)
: graph_exec_{other.graph_exec_} {
other.graph_exec_ = nullptr;
}
ExecutableGraph& ExecutableGraph::operator=(ExecutableGraph other) {
swap(other, *this);
return *this;
}
void swap(ExecutableGraph& first, ExecutableGraph& second) {
std::swap(first.graph_exec_, second.graph_exec_);
}
void ExecutableGraph::reset(cudaGraphExec_t graph_exec) {
if (graph_exec_) {
CHECK_CUDA(cudaGraphExecDestroy(graph_exec_));
}
graph_exec_ = graph_exec;
}
cudaGraphExec_t ExecutableGraph::release() {
auto old_graph_exec = graph_exec_;
graph_exec_ = nullptr;
return old_graph_exec;
}
cudaGraphExec_t ExecutableGraph::get() const noexcept {
return graph_exec_;
}
ExecutableGraph::operator cudaGraphExec_t() const noexcept {
return get();
}
void ExecutableGraph::launch(cudaStream_t stream) const {
if (!graph_exec_) {
LBANN_ERROR("attempted to launch null cudaGraphExec_t");
}
CHECK_CUDA(cudaGraphLaunch(graph_exec_, stream));
}
void ExecutableGraph::update(cudaGraph_t graph) {
// Make sure CUDA graph is valid
if (!graph) {
LBANN_ERROR("attempting to update cudaGraphExec_t with null cudaGraph_t");
}
// Try updating executable CUDA graph
#if (__CUDACC_VER_MAJOR__*100+__CUDACC_VER_MINOR__) < 1002 // < 10.2
reset();
#else // >= 10.2
if (graph_exec_) {
cudaGraphNode_t error_node;
cudaGraphExecUpdateResult result;
auto status = cudaGraphExecUpdate(graph_exec_, graph, &error_node, &result);
switch (status) {
case cudaSuccess:
break;
case cudaErrorGraphExecUpdateFailure:
reset();
break;
default:
CHECK_CUDA(status);
reset();
}
}
#endif // CUDA version >= 10.02
// If update failed, create new executable CUDA graph
if (!graph_exec_) {
*this = ExecutableGraph(graph);
}
}
// -------------------------------------------------------------
// Helper functions for tensor operations
// -------------------------------------------------------------
namespace {
using int4 = gpu_lib::array<int, 4>;
/**
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (dim[3] / bdimx) x (dim[2] / bdimy) x (dim[1] / bdimx)
*/
template <typename TensorDataType>
__global__ void copy_4d_kernel(
int4 dims,
const TensorDataType* __restrict__ input,
int4 input_strides,
TensorDataType* __restrict__ output,
int4 output_strides) {
// Indices
const auto& gidx = threadIdx.x + blockIdx.x * blockDim.x;
const auto& gidy = threadIdx.y + blockIdx.y * blockDim.y;
const auto& gidz = threadIdx.z + blockIdx.z * blockDim.z;
const auto& nthreadsx = gridDim.x * blockDim.x;
const auto& nthreadsy = gridDim.y * blockDim.y;
const auto& nthreadsz = gridDim.z * blockDim.z;
for (int i0=0; i0<dims[0]; ++i0) {
for (int i1=gidz; i1<dims[1]; i1+=nthreadsz) {
for (int i2=gidy; i2<dims[2]; i2+=nthreadsy) {
for (int i3=gidx; i3<dims[3]; i3+=nthreadsx) {
const auto& x = input[i0 * input_strides[0]
+ i1 * input_strides[1]
+ i2 * input_strides[2]
+ i3 * input_strides[3]];
auto& y = output[i0 * output_strides[0]
+ i1 * output_strides[1]
+ i2 * output_strides[2]
+ i3 * output_strides[3]];
y = x;
}
}
}
}
}
} // namespace <anon>
template <typename TensorDataType>
void copy_tensor(
cudaStream_t stream,
const std::vector<size_t>& dims,
const TensorDataType* input,
const std::vector<size_t>& input_strides,
TensorDataType* output,
const std::vector<size_t>& output_strides) {
// Check inputs
if (dims.empty() || dims.size() > 4) {
LBANN_ERROR("invalid number of tensor dimensions (",dims.size(),")");
}
if (dims.size() != input_strides.size()) {
LBANN_ERROR(
"number of input strides (",input_strides.size(),") ",
"does not match number of tensor dimensions (",dims.size(),")");
}
if (dims.size() != output_strides.size()) {
LBANN_ERROR(
"number of output strides (",output_strides.size(),") ",
"does not match number of tensor dimensions (",dims.size(),")");
}
// Pad tensor dimensions to 4D
std::vector<int>
rdims(dims.rbegin(), dims.rend()),
input_rstrides(input_strides.rbegin(), input_strides.rend()),
output_rstrides(output_strides.rbegin(), output_strides.rend());
rdims.resize(4, 1);
input_rstrides.resize(4, input_rstrides.back());
output_rstrides.resize(4, output_rstrides.back());
// Launch CUDA kernel
const auto size = std::accumulate(
dims.begin(), dims.end(), 1, std::multiplies<int>());
if (size > 0) {
constexpr size_t block_size = 64;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.y = 1;
block_dims.z = 1;
grid_dims.x = (rdims[0] + block_dims.x - 1) / block_dims.x;
grid_dims.y = (rdims[1] + block_dims.y - 1) / block_dims.y;
grid_dims.z = (rdims[2] + block_dims.z - 1) / block_dims.z;
grid_dims.y = El::Min(grid_dims.y, 65535);
grid_dims.z = El::Min(grid_dims.z, 65535);
copy_4d_kernel<<<grid_dims, block_dims, 0, stream>>>(
{rdims[3], rdims[2], rdims[1], rdims[0]},
input,
{input_rstrides[3], input_rstrides[2],
input_rstrides[1], input_rstrides[0]},
output,
{output_rstrides[3], output_rstrides[2],
output_rstrides[1], output_rstrides[0]});
}
}
#if defined(LBANN_HAS_HALF) && defined(LBANN_HAS_GPU_HALF)
template <>
void copy_tensor<cpu_fp16>(
cudaStream_t stream,
const std::vector<size_t>& dims,
const cpu_fp16* input,
const std::vector<size_t>& input_strides,
cpu_fp16* output,
const std::vector<size_t>& output_strides) {
copy_tensor<fp16>(
stream,
dims,
reinterpret_cast<const fp16*>(input),
input_strides,
reinterpret_cast<fp16*>(output),
output_strides);
}
#endif // defined(LBANN_HAS_HALF) && defined(LBANN_HAS_GPU_HALF)
// Explicit template instantiation
#define PROTO(T) \
template void copy_tensor<T>( \
cudaStream_t stream, \
const std::vector<size_t>& dims, \
const T* input, \
const std::vector<size_t>& input_strides, \
T* output, \
const std::vector<size_t>& output_strides);
#define LBANN_INSTANTIATE_GPU_HALF
#define LBANN_INSTANTIATE_CPU_HALF
#include "lbann/macros/instantiate.hpp"
#undef PROTO
void mem_copy_async(
void* output,
const void* input,
const size_t count,
cudaMemcpyKind kind,
cudaStream_t stream) {
CHECK_CUDA(cudaMemcpyAsync(
output,
input,
count,
kind,
stream));
}
} // namespace cuda
} // namespace lbann
#endif // LBANN_HAS_CUDA
|
3953cf0063ddbaaf9ec024b5b87c6a5159fb3975.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2017 by Contributors
* \file deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, and dilation.
* These functions are mainly used in convolution operators.
* The implementation of the im2col and col2im algorithms
* are copied from Caffe with minor interface modifications
* adapting to MXNet data structures.
*/
#ifndef TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_
#define TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_
// #if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "deform_conv.h"
#include "hip/hip_runtime.h"
#include "tensorflow/core/util/cuda_kernel_helper.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/logging.h"
#include <algorithm>
#include <cstring>
#include <vector>
namespace tensorflow {
typedef Eigen::GpuDevice GPUDevice;
typedef std::vector<int32> TShape;
// fetch value from bottom_data(1D array), using subscript (h, w)
template <typename DType>
__device__ DType deformable_im2col_bilinear(const DType* bottom_data, const int data_width,
const int height, const int width, DType h, DType w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (DType)h_low;
}
else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (DType)w_low;
}
else {
w_high = w_low + 1;
}
DType lh = h - h_low;
DType lw = w - w_low;
DType hh = 1 - lh, hw = 1 - lw;
DType v1 = bottom_data[h_low * data_width + w_low];
DType v2 = bottom_data[h_low * data_width + w_high];
DType v3 = bottom_data[h_high * data_width + w_low];
DType v4 = bottom_data[h_high * data_width + w_high];
DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename DType>
__device__ DType get_gradient_weight(DType argmax_h, DType argmax_w,
const int h, const int w, const int height, const int width) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
//empty
return 0;
}
argmax_h = max(argmax_h, (DType)0.0f);
argmax_w = max(argmax_w, (DType)0.0f);
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (DType)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1)
{
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (DType)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
} else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename DType>
__device__ DType get_coordinate_weight(DType argmax_h, DType argmax_w,
const int height, const int width, const DType* im_data,
const int data_width, const int bp_dir) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width)
{
//empty
return 0;
}
if (argmax_h < 0) argmax_h = 0;
if (argmax_w < 0) argmax_w = 0;
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (DType)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (DType)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (bp_dir == 0) {
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
/*!
* \brief im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename DType>
__global__ void deformable_im2col_gpu_kernel(const int n, const DType* data_im, const DType* data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int height_col, const int width_col,
DType* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int c_im = (index / width_col) / height_col;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType* data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col;
const DType* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in;
const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
const DType map_h = i * dilation_h + offset_h;
const DType map_w = j * dilation_w + offset_w;
const int cur_height = height - h_in;
const int cur_width = width - w_in;
val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
/*!
* \brief DO NOT call this directly. Use wrapper function deformable_col2im() instead;
*/
template <typename DType>
__global__ void deformable_col2im_gpu_kernel(const int n, const DType* data_col, const DType* data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int height_col, const int width_col,
DType* grad_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col) % kernel_w;
const int i = (index / width_col / height_col / kernel_w) % kernel_h;
const int c = index / width_col / height_col / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx;
DType weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
CudaAtomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
/*!
* \brief DO NOT call this directly. Use wrapper function deformable_col2im_coord() instead;
*/
template <typename DType>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const DType* data_col,
const DType* data_im, const DType* data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int height_col, const int width_col,
DType* grad_offset) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = index / width_col / height_col;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col;
const DType* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = ((col_c * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col) % kernel_w;
int i = (col_pos / width_col / height_col / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -1;
}
const DType weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
/*!
* \brief im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename DType>
__global__ void im2col_gpu_kernel(const int n, const DType* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
DType* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
DType* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const DType* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : static_cast<DType>(0);
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename DType>
__global__ void pureAddToKernel(const int n, DType* result_data, const DType* right_data)
{
CUDA_1D_KERNEL_LOOP(index, n) {
CudaAtomicAdd(result_data+index, right_data[index]);
}
}
template <typename DType>
__global__ void pureSubToKernel(const int n, DType* result_data, const DType* right_data)
{
CUDA_1D_KERNEL_LOOP(index, n) {
CudaAtomicAdd(result_data+index, -right_data[index]);
}
}
template <typename DType>
__global__ void setZeroKernel(const int n, DType* result_data)
{
CUDA_1D_KERNEL_LOOP(index, n) {
*(result_data+index)=DType(0);
}
}
namespace functor {
inline int ProdShape(const TShape &shape, int start);
/*!\brief im2col gpu version
* \param s device stream
* \param data_im pointer of an image (C, H, W, ...) in the image batch
* \param col_shape column buffer shape (#channels, output_im_height, output_im_width, ...)
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param data_col column buffer pointer
*/
template <typename DType>
struct deformable_im2col<GPUDevice, DType>{
void operator()(const GPUDevice& d,
const DType* data_im, const DType* data_offset,
const TShape& im_shape, const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride, const TShape& dilation,
const int deformable_group, DType* data_col) {
// num_axes should be smaller than block size
int num_spatial_axes = kernel_shape.size();
int channel_per_deformable_group = im_shape[1] / deformable_group;
int num_kernels = im_shape[1] * ProdShape(col_shape, 1);
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
// num_spatial_axes 2 channel_per_deformable_group 6 num_kernels 72 col_shape 24 3 4 0 im_shape8 6 im_shape4 5 kernel_shape 2 2 pad 0 0
// num_spatial_axes 2 channel_per_deformable_group 6 num_kernels 72 col_shape 24 3 4 0 im_shape8 6 config 1 256
// LOG(INFO) << "num_spatial_axes " << num_spatial_axes << " channel_per_deformable_group " << channel_per_deformable_group
// << " num_kernels " << num_kernels << " col_shape " << col_shape[0]<<" " << col_shape[1] <<" " << col_shape[2]<<" " << col_shape[3]<<" "
// << "im_shape" <<im_shape[0] << " " << im_shape[1] << " \n" << "config "<<config.block_count<<" "<< config.thread_per_block;
// LOG(INFO) <<" im_shape"<<im_shape[2]<<" "<<im_shape[3]<<" kernel_shape "<<kernel_shape[0]<<" "<<kernel_shape[1]<<" pad "<<pad[0]<<" "<<pad[1];
// LOG(INFO) <<" dilation "<<dilation[0]<<" "<<dilation[1];
switch (num_spatial_axes) {
case 2:
deformable_im2col_gpu_kernel<DType> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream(),
num_kernels, data_im, data_offset, im_shape[2], im_shape[3], kernel_shape[0], kernel_shape[1],
pad[0], pad[1], stride[0], stride[1], dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], col_shape[2], data_col);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_im2col_gpu_kernel);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
};
inline int ProdShape(const TShape &shape, int start) {
int64 res = 1;
for(int i=start; i<shape.size(); i++) {
res*=shape[i];
}
return res;
}
/*!\brief
* gpu function of col2im algorithm
* \param s GPUDevice stream
* \param data_col start pointer of the column buffer to be filled
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param data_im pointer of a image (C, H, W,...) in the image batch
*/
template <typename DType>
struct deformable_col2im<GPUDevice, DType>{
void operator()(const GPUDevice& d,
const DType* data_col, const DType* data_offset,
const TShape& im_shape, const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride,
const TShape& dilation, const int deformable_group,
DType* grad_im) {
int num_spatial_axes = kernel_shape.size();
int im_size = ProdShape(im_shape, 1);
int channel_per_deformable_group = im_shape[1] / deformable_group;
int num_kernels = ProdShape(col_shape, 0);
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
// 6 4 52 2 0 0 2 21 1 6 2 2
// 6 4 52 2 0 0 2 21 1 6 2 2
// LOG(INFO) << im_shape[1]<<' '<<im_shape[2]<<' '<<im_shape[3]<<
// kernel_shape[0]<<' '<<kernel_shape[1]<<' '<<pad[0]<<' '<<pad[1]<<' '<<stride[0]<<' '<<stride[1]<<
// dilation[0]<<' '<<dilation[1]<<' '<<channel_per_deformable_group<<' '<<col_shape[1]<<' '<<col_shape[2];
// using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel<DType>), dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream(),
num_kernels, data_col, data_offset, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group, col_shape[1], col_shape[2], grad_im);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_gpu_kernel);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
};
template <typename DType>
struct deformable_col2im_coord<GPUDevice, DType>{
void operator() (const GPUDevice& d,
const DType* data_col, const DType* data_im, const DType* data_offset, const TShape& im_shape,
const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride,
const TShape& dilation, const int deformable_group, DType* grad_offset) {
size_t num_spatial_axes = kernel_shape.size();
size_t num_kernels = col_shape[1] * col_shape[2] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group;
size_t channel_per_deformable_group = col_shape[0] / deformable_group;
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
// using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
deformable_col2im_coord_gpu_kernel<DType> << <config.block_count, config.thread_per_block, 0, d.stream() >> >(
num_kernels, data_col, data_im, data_offset, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group, col_shape[1], col_shape[2], grad_offset);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_gpu_kernel);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with " << num_spatial_axes << " spatial axes";
}
}
};
/*!\brief im2col gpu version
* \param s device stream
* \param data_im pointer of an image (C, H, W, ...) in the image batch
* \param col_shape column buffer shape (#channels, output_im_height, output_im_width, ...)
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param data_col column buffer pointer
*/
template <typename DType>
struct im2col<GPUDevice, DType>{
void operator() (const GPUDevice& d,
const DType* data_im, const TShape& im_shape,
const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride,
const TShape& dilation, DType* data_col) {
// num_axes should be smaller than block size
int num_spatial_axes = kernel_shape.size();
int num_kernels = im_shape[1] * ProdShape(col_shape, 1);
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
switch (num_spatial_axes) {
case 2:
im2col_gpu_kernel<DType> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream() ,
num_kernels, data_im, im_shape[2], im_shape[3], kernel_shape[0], kernel_shape[1],
pad[0], pad[1], stride[0], stride[1], dilation[0], dilation[1],
col_shape[1], col_shape[2], data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
};
template <typename DType>
struct pureAddTo<GPUDevice, DType>{
void operator() (const GPUDevice& d, const int n, DType* result_data, const DType* right_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
hipLaunchKernelGGL(( pureAddToKernel<DType>) , dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream() , n, result_data, right_data);
}
};
template <typename DType>
struct pureSubTo<GPUDevice, DType>{
void operator() (const GPUDevice& d, const int n, DType* result_data, const DType* right_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
hipLaunchKernelGGL(( pureSubToKernel<DType>) , dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream() , n, result_data, right_data);
}
};
template <typename DType>
struct setZero<GPUDevice, DType>{
void operator() (const GPUDevice& d, const int n, DType* result_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
hipLaunchKernelGGL(( setZeroKernel<DType>) , dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream() , n, result_data);
}
};
} // namespace functor
#define DECLARE_GPU_SPEC(DType) \
template struct functor::deformable_im2col<GPUDevice, DType>; \
template struct functor::deformable_col2im<GPUDevice, DType>; \
template struct functor::deformable_col2im_coord<GPUDevice, DType>; \
template struct functor::pureAddTo<GPUDevice, DType>; \
template struct functor::pureSubTo<GPUDevice, DType>; \
template struct functor::setZero<GPUDevice, DType>; \
template struct functor::im2col<GPUDevice, DType>;
// extern template struct Copy<GPUDevice, T>;
TF_CALL_float(DECLARE_GPU_SPEC);
TF_CALL_double(DECLARE_GPU_SPEC);
// TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPEC);
#undef DECLARE_GPU_SPEC
} // namespace tensorflow
// #endif // GOOGLE_CUDA
#endif // TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_
|
3953cf0063ddbaaf9ec024b5b87c6a5159fb3975.cu
|
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2017 by Contributors
* \file deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, and dilation.
* These functions are mainly used in convolution operators.
* The implementation of the im2col and col2im algorithms
* are copied from Caffe with minor interface modifications
* adapting to MXNet data structures.
*/
#ifndef TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_
#define TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_
// #if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "deform_conv.h"
#include "cuda.h"
#include "tensorflow/core/util/cuda_kernel_helper.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/logging.h"
#include <algorithm>
#include <cstring>
#include <vector>
namespace tensorflow {
typedef Eigen::GpuDevice GPUDevice;
typedef std::vector<int32> TShape;
// fetch value from bottom_data(1D array), using subscript (h, w)
template <typename DType>
__device__ DType deformable_im2col_bilinear(const DType* bottom_data, const int data_width,
const int height, const int width, DType h, DType w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high;
int w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = (DType)h_low;
}
else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = (DType)w_low;
}
else {
w_high = w_low + 1;
}
DType lh = h - h_low;
DType lw = w - w_low;
DType hh = 1 - lh, hw = 1 - lw;
DType v1 = bottom_data[h_low * data_width + w_low];
DType v2 = bottom_data[h_low * data_width + w_high];
DType v3 = bottom_data[h_high * data_width + w_low];
DType v4 = bottom_data[h_high * data_width + w_high];
DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename DType>
__device__ DType get_gradient_weight(DType argmax_h, DType argmax_w,
const int h, const int w, const int height, const int width) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
//empty
return 0;
}
argmax_h = max(argmax_h, (DType)0.0f);
argmax_w = max(argmax_w, (DType)0.0f);
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (DType)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1)
{
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (DType)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
} else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename DType>
__device__ DType get_coordinate_weight(DType argmax_h, DType argmax_w,
const int height, const int width, const DType* im_data,
const int data_width, const int bp_dir) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width)
{
//empty
return 0;
}
if (argmax_h < 0) argmax_h = 0;
if (argmax_w < 0) argmax_w = 0;
int argmax_h_low = (int)argmax_h;
int argmax_w_low = (int)argmax_w;
int argmax_h_high;
int argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = (DType)argmax_h_low;
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = (DType)argmax_w_low;
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (bp_dir == 0) {
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
/*!
* \brief im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename DType>
__global__ void deformable_im2col_gpu_kernel(const int n, const DType* data_im, const DType* data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int height_col, const int width_col,
DType* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int c_im = (index / width_col) / height_col;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType* data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col;
const DType* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in;
const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
const DType map_h = i * dilation_h + offset_h;
const DType map_w = j * dilation_w + offset_w;
const int cur_height = height - h_in;
const int cur_width = width - w_in;
val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
/*!
* \brief DO NOT call this directly. Use wrapper function deformable_col2im() instead;
*/
template <typename DType>
__global__ void deformable_col2im_gpu_kernel(const int n, const DType* data_col, const DType* data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int height_col, const int width_col,
DType* grad_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col) % kernel_w;
const int i = (index / width_col / height_col / kernel_w) % kernel_h;
const int c = index / width_col / height_col / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx;
DType weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
CudaAtomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
/*!
* \brief DO NOT call this directly. Use wrapper function deformable_col2im_coord() instead;
*/
template <typename DType>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const DType* data_col,
const DType* data_im, const DType* data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int height_col, const int width_col,
DType* grad_offset) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = index / width_col / height_col;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col;
const DType* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const DType* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = ((col_c * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col) % kernel_w;
int i = (col_pos / width_col / height_col / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -1;
}
const DType weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
/*!
* \brief im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename DType>
__global__ void im2col_gpu_kernel(const int n, const DType* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
DType* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
DType* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const DType* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : static_cast<DType>(0);
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename DType>
__global__ void pureAddToKernel(const int n, DType* result_data, const DType* right_data)
{
CUDA_1D_KERNEL_LOOP(index, n) {
CudaAtomicAdd(result_data+index, right_data[index]);
}
}
template <typename DType>
__global__ void pureSubToKernel(const int n, DType* result_data, const DType* right_data)
{
CUDA_1D_KERNEL_LOOP(index, n) {
CudaAtomicAdd(result_data+index, -right_data[index]);
}
}
template <typename DType>
__global__ void setZeroKernel(const int n, DType* result_data)
{
CUDA_1D_KERNEL_LOOP(index, n) {
*(result_data+index)=DType(0);
}
}
namespace functor {
inline int ProdShape(const TShape &shape, int start);
/*!\brief im2col gpu version
* \param s device stream
* \param data_im pointer of an image (C, H, W, ...) in the image batch
* \param col_shape column buffer shape (#channels, output_im_height, output_im_width, ...)
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param data_col column buffer pointer
*/
template <typename DType>
struct deformable_im2col<GPUDevice, DType>{
void operator()(const GPUDevice& d,
const DType* data_im, const DType* data_offset,
const TShape& im_shape, const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride, const TShape& dilation,
const int deformable_group, DType* data_col) {
// num_axes should be smaller than block size
int num_spatial_axes = kernel_shape.size();
int channel_per_deformable_group = im_shape[1] / deformable_group;
int num_kernels = im_shape[1] * ProdShape(col_shape, 1);
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
// num_spatial_axes 2 channel_per_deformable_group 6 num_kernels 72 col_shape 24 3 4 0 im_shape8 6 im_shape4 5 kernel_shape 2 2 pad 0 0
// num_spatial_axes 2 channel_per_deformable_group 6 num_kernels 72 col_shape 24 3 4 0 im_shape8 6 config 1 256
// LOG(INFO) << "num_spatial_axes " << num_spatial_axes << " channel_per_deformable_group " << channel_per_deformable_group
// << " num_kernels " << num_kernels << " col_shape " << col_shape[0]<<" " << col_shape[1] <<" " << col_shape[2]<<" " << col_shape[3]<<" "
// << "im_shape" <<im_shape[0] << " " << im_shape[1] << " \n" << "config "<<config.block_count<<" "<< config.thread_per_block;
// LOG(INFO) <<" im_shape"<<im_shape[2]<<" "<<im_shape[3]<<" kernel_shape "<<kernel_shape[0]<<" "<<kernel_shape[1]<<" pad "<<pad[0]<<" "<<pad[1];
// LOG(INFO) <<" dilation "<<dilation[0]<<" "<<dilation[1];
switch (num_spatial_axes) {
case 2:
deformable_im2col_gpu_kernel<DType> // NOLINT_NEXT_LINE(whitespace/operators)
<<<config.block_count, config.thread_per_block, 0, d.stream()>>>(
num_kernels, data_im, data_offset, im_shape[2], im_shape[3], kernel_shape[0], kernel_shape[1],
pad[0], pad[1], stride[0], stride[1], dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], col_shape[2], data_col);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_im2col_gpu_kernel);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
};
inline int ProdShape(const TShape &shape, int start) {
int64 res = 1;
for(int i=start; i<shape.size(); i++) {
res*=shape[i];
}
return res;
}
/*!\brief
* gpu function of col2im algorithm
* \param s GPUDevice stream
* \param data_col start pointer of the column buffer to be filled
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param data_im pointer of a image (C, H, W,...) in the image batch
*/
template <typename DType>
struct deformable_col2im<GPUDevice, DType>{
void operator()(const GPUDevice& d,
const DType* data_col, const DType* data_offset,
const TShape& im_shape, const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride,
const TShape& dilation, const int deformable_group,
DType* grad_im) {
int num_spatial_axes = kernel_shape.size();
int im_size = ProdShape(im_shape, 1);
int channel_per_deformable_group = im_shape[1] / deformable_group;
int num_kernels = ProdShape(col_shape, 0);
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
// 6 4 52 2 0 0 2 21 1 6 2 2
// 6 4 52 2 0 0 2 21 1 6 2 2
// LOG(INFO) << im_shape[1]<<' '<<im_shape[2]<<' '<<im_shape[3]<<
// kernel_shape[0]<<' '<<kernel_shape[1]<<' '<<pad[0]<<' '<<pad[1]<<' '<<stride[0]<<' '<<stride[1]<<
// dilation[0]<<' '<<dilation[1]<<' '<<channel_per_deformable_group<<' '<<col_shape[1]<<' '<<col_shape[2];
// using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
deformable_col2im_gpu_kernel<DType><<<config.block_count, config.thread_per_block, 0, d.stream()>>>(
num_kernels, data_col, data_offset, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group, col_shape[1], col_shape[2], grad_im);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_gpu_kernel);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
};
template <typename DType>
struct deformable_col2im_coord<GPUDevice, DType>{
void operator() (const GPUDevice& d,
const DType* data_col, const DType* data_im, const DType* data_offset, const TShape& im_shape,
const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride,
const TShape& dilation, const int deformable_group, DType* grad_offset) {
size_t num_spatial_axes = kernel_shape.size();
size_t num_kernels = col_shape[1] * col_shape[2] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group;
size_t channel_per_deformable_group = col_shape[0] / deformable_group;
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
// using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
deformable_col2im_coord_gpu_kernel<DType> << <config.block_count, config.thread_per_block, 0, d.stream() >> >(
num_kernels, data_col, data_im, data_offset, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group, col_shape[1], col_shape[2], grad_offset);
// MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_gpu_kernel);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with " << num_spatial_axes << " spatial axes";
}
}
};
/*!\brief im2col gpu version
* \param s device stream
* \param data_im pointer of an image (C, H, W, ...) in the image batch
* \param col_shape column buffer shape (#channels, output_im_height, output_im_width, ...)
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param data_col column buffer pointer
*/
template <typename DType>
struct im2col<GPUDevice, DType>{
void operator() (const GPUDevice& d,
const DType* data_im, const TShape& im_shape,
const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride,
const TShape& dilation, DType* data_col) {
// num_axes should be smaller than block size
int num_spatial_axes = kernel_shape.size();
int num_kernels = im_shape[1] * ProdShape(col_shape, 1);
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
switch (num_spatial_axes) {
case 2:
im2col_gpu_kernel<DType> // NOLINT_NEXT_LINE(whitespace/operators)
<<<config.block_count, config.thread_per_block, 0, d.stream() >>>(
num_kernels, data_im, im_shape[2], im_shape[3], kernel_shape[0], kernel_shape[1],
pad[0], pad[1], stride[0], stride[1], dilation[0], dilation[1],
col_shape[1], col_shape[2], data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
};
template <typename DType>
struct pureAddTo<GPUDevice, DType>{
void operator() (const GPUDevice& d, const int n, DType* result_data, const DType* right_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
pureAddToKernel<DType> <<< config.block_count, config.thread_per_block, 0, d.stream() >>>(n, result_data, right_data);
}
};
template <typename DType>
struct pureSubTo<GPUDevice, DType>{
void operator() (const GPUDevice& d, const int n, DType* result_data, const DType* right_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
pureSubToKernel<DType> <<< config.block_count, config.thread_per_block, 0, d.stream() >>>(n, result_data, right_data);
}
};
template <typename DType>
struct setZero<GPUDevice, DType>{
void operator() (const GPUDevice& d, const int n, DType* result_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
setZeroKernel<DType> <<< config.block_count, config.thread_per_block, 0, d.stream() >>>(n, result_data);
}
};
} // namespace functor
#define DECLARE_GPU_SPEC(DType) \
template struct functor::deformable_im2col<GPUDevice, DType>; \
template struct functor::deformable_col2im<GPUDevice, DType>; \
template struct functor::deformable_col2im_coord<GPUDevice, DType>; \
template struct functor::pureAddTo<GPUDevice, DType>; \
template struct functor::pureSubTo<GPUDevice, DType>; \
template struct functor::setZero<GPUDevice, DType>; \
template struct functor::im2col<GPUDevice, DType>;
// extern template struct Copy<GPUDevice, T>;
TF_CALL_float(DECLARE_GPU_SPEC);
TF_CALL_double(DECLARE_GPU_SPEC);
// TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPEC);
#undef DECLARE_GPU_SPEC
} // namespace tensorflow
// #endif // GOOGLE_CUDA
#endif // TENSORFLOW_KERNELS_CONV_OPS_im2col_gpu_H_
|
f1c29b57cf333ef257d13e7d4ba3ab0536f1219b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "utils.h"
#include "VectorGPU.hpp"
#include <cmath>
#include <cassert>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "cuda_kernels_vector.hpp"
//Remove Later
#include "../host/Vector.hpp"
const int BLOCKSIZE = 4;
VectorGPU::VectorGPU(const Vector& hostVector)
{
mSize = hostVector.mSize;
checkCudaErrors(hipMalloc(&d_mData, mSize*sizeof(double)));
checkCudaErrors(hipMemcpy( d_mData,
hostVector.mData,
hostVector.mSize*sizeof(double),
hipMemcpyHostToDevice));
}
VectorGPU::VectorGPU(int size, double value)
{
mSize = size;
checkCudaErrors(hipMalloc(&d_mData, mSize*sizeof(double)));
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
hipLaunchKernelGGL(( kernel_fill_vector) , dim3(GridSize), dim3(BlockSize), 0, 0, mSize, d_mData, value);
}
VectorGPU::VectorGPU(int size)
{
mSize = size;
checkCudaErrors(hipMalloc(&d_mData, mSize*sizeof(double)));
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
hipLaunchKernelGGL(( kernel_fill_vector) , dim3(GridSize), dim3(BlockSize), 0, 0, mSize, d_mData, 0.0);
}
//Asigment operator
VectorGPU& VectorGPU::operator=(const VectorGPU& otherVector)
{
assert(mSize == otherVector.mSize);
checkCudaErrors(hipMemcpy ( d_mData,
otherVector.d_mData,
mSize*sizeof(double),
hipMemcpyDeviceToDevice));
return *this;
}
// Destructor
VectorGPU::~VectorGPU()
{
std::cout << "Destructor!!!!!!!!" << std::endl;
checkCudaErrors(hipFree(d_mData));
}
// Return Size method
int VectorGPU::GetSize() const
{
return mSize;
}
// Binary vector addition
VectorGPU VectorGPU::operator+(const VectorGPU& v1) const
{
VectorGPU result(mSize);
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
hipLaunchKernelGGL(( kernel_vector_add) , dim3(GridSize), dim3(BlockSize), 0, 0, mSize, d_mData, v1.d_mData, result.d_mData);
return result;
}
// Binary vector substraction
VectorGPU VectorGPU::operator-(const VectorGPU& v1) const
{
VectorGPU result(mSize);
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
hipLaunchKernelGGL(( kernel_vector_substract) , dim3(GridSize), dim3(BlockSize), 0, 0, mSize, d_mData, v1.d_mData, result.d_mData);
return result;
}
// Scalar multiplication
VectorGPU VectorGPU::operator*(double a) const
{
VectorGPU result(mSize);
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
hipLaunchKernelGGL(( kernel_vector_scalarmul) , dim3(GridSize), dim3(BlockSize), 0, 0, mSize, d_mData, result.d_mData, a);
return result;
}
// p-norm method
double VectorGPU::Norm(int p) const
{
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
double result = 0.0;
//double* d_result;
//checkCudaErrors(hipMalloc(&d_result, mSize*sizeof(double)));
VectorGPU aux(mSize);
Vector aux_host(mSize);
hipLaunchKernelGGL(( kernel_vector_power) , dim3(GridSize), dim3(BlockSize), 0, 0, mSize, d_mData, aux.d_mData, p);
hipLaunchKernelGGL(( kernel_sum_reduce_onevector) , dim3(GridSize), dim3(BlockSize), 0, 0, mSize, aux.d_mData);
//aux.copyToHost(aux_host);
//std::cout << aux_host << std::endl;
//If the grid size is odd, we get at an odd number of elements at the beginning of the
// array that we need to sum, and the algorithm kernel_sum_reduce_onevector
// only reduces an even number of elements
if ( GridSize.x % 2 != 0)
{
// We change the element to 0 and increase the size of the grid by one
// to get an even number of elements
double zero = 0.0;
checkCudaErrors(hipMemcpy(&aux.d_mData[GridSize.x], &zero, sizeof(double), hipMemcpyHostToDevice));
BlockSize = GridSize.x + 1;
GridSize = 1;
hipLaunchKernelGGL(( kernel_sum_reduce_onevector) , dim3(GridSize), dim3(BlockSize), 0, 0, mSize, aux.d_mData);
}
BlockSize = GridSize;
GridSize = 1;
hipLaunchKernelGGL(( kernel_sum_reduce_onevector) , dim3(GridSize), dim3(BlockSize), 0, 0, mSize, aux.d_mData);
//aux.copyToHost(aux_host);
//std::cout << aux_host << std::endl;
checkCudaErrors(hipMemcpy( &result, aux.d_mData, sizeof(double), hipMemcpyDeviceToHost));
//hipFree(d_aux);
//hipFree(d_result);
// 1/p power
result = pow(result, 1/(double)p);
return result;
}
double VectorGPU::operator*(const VectorGPU& v1) const
{
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
double result = 0.0;
VectorGPU aux(mSize);
Vector aux_host(mSize);
hipLaunchKernelGGL(( kernel_vector_elementwise_product), dim3(GridSize), dim3(BlockSize), 0, 0, mSize, d_mData,
v1.d_mData, aux.d_mData);
hipLaunchKernelGGL(( kernel_sum_reduce_onevector), dim3(GridSize), dim3(BlockSize), 0, 0, mSize, aux.d_mData);
//aux.copyToHost(aux_host);
//std::cout << aux_host << std::endl;
//If the grid size is odd, we get at an odd number of elements at the beginning of the
// array that we need to sum, and the algorithm kernel_sum_reduce_onevector
// only reduces an even number of elements
if ( GridSize.x % 2 != 0)
{
// We change the element to 0 and increase the size of the grid by one
// to get an even number of elements
double zero = 0.0;
checkCudaErrors(hipMemcpy(&aux.d_mData[GridSize.x], &zero, sizeof(double), hipMemcpyHostToDevice));
BlockSize = GridSize.x + 1;
GridSize = 1;
hipLaunchKernelGGL(( kernel_sum_reduce_onevector) , dim3(GridSize), dim3(BlockSize), 0, 0, mSize, aux.d_mData);
}
BlockSize = GridSize;
GridSize = 1;
hipLaunchKernelGGL(( kernel_sum_reduce_onevector) , dim3(GridSize), dim3(BlockSize), 0, 0, mSize, aux.d_mData);
checkCudaErrors(hipMemcpy( &result, aux.d_mData, sizeof(double), hipMemcpyDeviceToHost));
return result;
}
void VectorGPU::copyToHost(Vector& hostVector)
{
checkCudaErrors(hipMemcpy( hostVector.mData,
d_mData,
hostVector.mSize*sizeof(double),
hipMemcpyDeviceToHost));
}
|
f1c29b57cf333ef257d13e7d4ba3ab0536f1219b.cu
|
#include "utils.h"
#include "VectorGPU.hpp"
#include <cmath>
#include <cassert>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include "cuda_kernels_vector.hpp"
//Remove Later
#include "../host/Vector.hpp"
const int BLOCKSIZE = 4;
VectorGPU::VectorGPU(const Vector& hostVector)
{
mSize = hostVector.mSize;
checkCudaErrors(cudaMalloc(&d_mData, mSize*sizeof(double)));
checkCudaErrors(cudaMemcpy( d_mData,
hostVector.mData,
hostVector.mSize*sizeof(double),
cudaMemcpyHostToDevice));
}
VectorGPU::VectorGPU(int size, double value)
{
mSize = size;
checkCudaErrors(cudaMalloc(&d_mData, mSize*sizeof(double)));
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
kernel_fill_vector <<<GridSize, BlockSize>>>(mSize, d_mData, value);
}
VectorGPU::VectorGPU(int size)
{
mSize = size;
checkCudaErrors(cudaMalloc(&d_mData, mSize*sizeof(double)));
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
kernel_fill_vector <<<GridSize, BlockSize>>>(mSize, d_mData, 0.0);
}
//Asigment operator
VectorGPU& VectorGPU::operator=(const VectorGPU& otherVector)
{
assert(mSize == otherVector.mSize);
checkCudaErrors(cudaMemcpy ( d_mData,
otherVector.d_mData,
mSize*sizeof(double),
cudaMemcpyDeviceToDevice));
return *this;
}
// Destructor
VectorGPU::~VectorGPU()
{
std::cout << "Destructor!!!!!!!!" << std::endl;
checkCudaErrors(cudaFree(d_mData));
}
// Return Size method
int VectorGPU::GetSize() const
{
return mSize;
}
// Binary vector addition
VectorGPU VectorGPU::operator+(const VectorGPU& v1) const
{
VectorGPU result(mSize);
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
kernel_vector_add <<<GridSize, BlockSize>>> (mSize, d_mData, v1.d_mData, result.d_mData);
return result;
}
// Binary vector substraction
VectorGPU VectorGPU::operator-(const VectorGPU& v1) const
{
VectorGPU result(mSize);
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
kernel_vector_substract <<<GridSize, BlockSize>>> (mSize, d_mData, v1.d_mData, result.d_mData);
return result;
}
// Scalar multiplication
VectorGPU VectorGPU::operator*(double a) const
{
VectorGPU result(mSize);
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
kernel_vector_scalarmul <<<GridSize, BlockSize>>> (mSize, d_mData, result.d_mData, a);
return result;
}
// p-norm method
double VectorGPU::Norm(int p) const
{
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
double result = 0.0;
//double* d_result;
//checkCudaErrors(cudaMalloc(&d_result, mSize*sizeof(double)));
VectorGPU aux(mSize);
Vector aux_host(mSize);
kernel_vector_power <<<GridSize, BlockSize>>> (mSize, d_mData, aux.d_mData, p);
kernel_sum_reduce_onevector <<<GridSize, BlockSize>>> (mSize, aux.d_mData);
//aux.copyToHost(aux_host);
//std::cout << aux_host << std::endl;
//If the grid size is odd, we get at an odd number of elements at the beginning of the
// array that we need to sum, and the algorithm kernel_sum_reduce_onevector
// only reduces an even number of elements
if ( GridSize.x % 2 != 0)
{
// We change the element to 0 and increase the size of the grid by one
// to get an even number of elements
double zero = 0.0;
checkCudaErrors(cudaMemcpy(&aux.d_mData[GridSize.x], &zero, sizeof(double), cudaMemcpyHostToDevice));
BlockSize = GridSize.x + 1;
GridSize = 1;
kernel_sum_reduce_onevector <<<GridSize, BlockSize>>> (mSize, aux.d_mData);
}
BlockSize = GridSize;
GridSize = 1;
kernel_sum_reduce_onevector <<<GridSize, BlockSize>>> (mSize, aux.d_mData);
//aux.copyToHost(aux_host);
//std::cout << aux_host << std::endl;
checkCudaErrors(cudaMemcpy( &result, aux.d_mData, sizeof(double), cudaMemcpyDeviceToHost));
//cudaFree(d_aux);
//cudaFree(d_result);
// 1/p power
result = pow(result, 1/(double)p);
return result;
}
double VectorGPU::operator*(const VectorGPU& v1) const
{
dim3 BlockSize(BLOCKSIZE);
dim3 GridSize( mSize / BLOCKSIZE +1);
double result = 0.0;
VectorGPU aux(mSize);
Vector aux_host(mSize);
kernel_vector_elementwise_product<<<GridSize, BlockSize>>> ( mSize, d_mData,
v1.d_mData, aux.d_mData);
kernel_sum_reduce_onevector<<<GridSize, BlockSize>>> (mSize, aux.d_mData);
//aux.copyToHost(aux_host);
//std::cout << aux_host << std::endl;
//If the grid size is odd, we get at an odd number of elements at the beginning of the
// array that we need to sum, and the algorithm kernel_sum_reduce_onevector
// only reduces an even number of elements
if ( GridSize.x % 2 != 0)
{
// We change the element to 0 and increase the size of the grid by one
// to get an even number of elements
double zero = 0.0;
checkCudaErrors(cudaMemcpy(&aux.d_mData[GridSize.x], &zero, sizeof(double), cudaMemcpyHostToDevice));
BlockSize = GridSize.x + 1;
GridSize = 1;
kernel_sum_reduce_onevector <<<GridSize, BlockSize>>> (mSize, aux.d_mData);
}
BlockSize = GridSize;
GridSize = 1;
kernel_sum_reduce_onevector <<<GridSize, BlockSize>>> (mSize, aux.d_mData);
checkCudaErrors(cudaMemcpy( &result, aux.d_mData, sizeof(double), cudaMemcpyDeviceToHost));
return result;
}
void VectorGPU::copyToHost(Vector& hostVector)
{
checkCudaErrors(cudaMemcpy( hostVector.mData,
d_mData,
hostVector.mSize*sizeof(double),
cudaMemcpyDeviceToHost));
}
|
d1a925ae2492b239f7a84aeee1235590e1c16059.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by on 2019/12/31.
//
#ifdef __JETBRAINS_IDE__
#define __host__
#define __device__
#define __shared__
#define __constant__
#define __global__
// This is slightly mental, but gets it to properly index device function calls like __popc and whatever.
//#define __HIPCC__
// These headers are all implicitly present when you compile CUDA with clang. Clion doesn't know that, so
// we include them explicitly to make the indexer happy. Doing this when you actually build is, obviously,
// a terrible idea :D
//#include <__clang_cuda_builtin_vars.h>
//#include <__clang_cuda_intrinsics.h>
//#include <__clang_cuda_math_forward_declares.h>
//#include <__clang_cuda_complex_builtins.h>
//#include <__clang_cuda_cmath.h>
#endif // __JETBRAINS_IDE__
#include "deformable_conv2d.h"
#include <cstdlib>
#include "tensorflow/core/util/gpu_kernel_helper.h"
#ifdef GOOGLE_CUDA
#include "tensorflow/core/platform/stream_executor.h"
#endif
namespace tensorflow {
typedef Eigen::GpuDevice GPUDevice;
typedef Eigen::ThreadPoolDevice CPUDevice;
Eigen::IndexPair<Eigen::DenseIndex> ContractionDims(bool adj_x, bool adj_y) {
return Eigen::IndexPair<Eigen::DenseIndex>(adj_x ? 0 : 1, adj_y ? 1 : 0);
}
template<typename DType>
__host__ __device__ DType DmcnIm2colBilinear(const DType *bottom_data,
const int data_width,
const int height,
const int width,
DType h,
DType w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
DType lh = h - h_low;
DType lw = w - w_low;
DType hh = 1 - lh, hw = 1 - lw;
DType v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
DType v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
DType v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
DType v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template<typename DType>
__host__ __device__ DType DmcnGetGradientWeight(DType argmax_h,
DType argmax_w,
const int h,
const int w,
const int height,
const int width) {
/*
* offset h, offset w, (h, w) coordinate
*/
if (argmax_h <= -1 || argmax_w <= -1 || argmax_h >= height || argmax_w >= width) {
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
DType weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template<typename DType>
__host__ __device__ DType DmcnGetCoordinateWeight(DType argmax_h,
DType argmax_w,
const int height,
const int width,
const DType *im_data,
const int data_width,
const int bp_dir) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) {
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
DType weight = 0;
if (bp_dir == 0) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void SwapAxisKernel(const int n, const int cuda_mem_size, const int min_unit_size,
DType *input_data, const int dim_num, const int axis_x_dims, const int axis_y_dims,
const int axis_x, const int axis_y) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType *device_data = new DType[cuda_mem_size];
DType *input_data_ptr = input_data + index * cuda_mem_size;
for (int j = 0; j < axis_y_dims; j++) {
for (int i = 0; i < axis_x_dims; i++) {
DType *temp_ptr = input_data_ptr + (i * axis_x_dims + j) * min_unit_size;
DType *device_data_temp_ptr = device_data + (j * axis_y_dims + i) * min_unit_size;
for (int k = 0; k < min_unit_size; k++) {
*(device_data_temp_ptr + k) = *(temp_ptr + k);
}
}
}
for (int i = 0; i < cuda_mem_size; i++) {
*(input_data_ptr + i) = *(device_data + i);
}
delete[]device_data;
}
}
#endif
template<typename DType>
void SwapAxisKernel(const CPUDevice &d, const int n, const int cuda_mem_size, const int min_unit_size,
DType *input_data, const int dim_num, const int axis_x_dims, const int axis_y_dims,
const int axis_x, const int axis_y) {
d.parallelFor(n,
Eigen::TensorOpCost(cuda_mem_size, cuda_mem_size, cuda_mem_size * axis_y_dims * axis_x_dims),
[min_unit_size, input_data, dim_num, axis_x_dims, axis_y_dims,
axis_x, axis_y, cuda_mem_size](int64 start, int64 end) {
for (int64 index = start; index < end; index++) {
DType *device_data = new DType[cuda_mem_size];
DType *input_data_ptr = input_data + index * cuda_mem_size;
for (int j = 0; j < axis_y_dims; j++) {
for (int i = 0; i < axis_x_dims; i++) {
DType *temp_ptr = input_data_ptr + (i * axis_x_dims + j) * min_unit_size;
DType *device_data_temp_ptr = device_data + (j * axis_y_dims + i) * min_unit_size;
for (int k = 0; k < min_unit_size; k++) {
*(device_data_temp_ptr + k) = *(temp_ptr + k);
}
}
}
for (int idx = 0; idx < cuda_mem_size; idx++) {
*(input_data_ptr + idx) = *(device_data + idx);
}
delete[] device_data;
}
});
}
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void DeformableConv2DIm2ColKernel(const int n,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int num_channels,
const int deformable_group,
const int height_col,
const int width_col,
DType *data_col) {
/*
* channel_per_deformable_group // deformable_group,
* //batch_sizeim2col_step_, 1
*/
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const DType *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const DType *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h *
kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
val = DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
#endif
template<typename DType>
void DeformableConv2DIm2ColCPUKernel(const CPUDevice &d,
const int n,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group, // deformable_group,
const int batch_size,
const int num_channels,
const int deformable_group, //batch_sizeim2col_step_, 1
const int height_col,
const int width_col,
DType *data_col) {
auto f = [n, data_im, data_offset, data_mask, height, width, kernel_h, kernel_w,
pad_h, pad_w, stride_w, stride_h, dilation_w, dilation_h, channel_per_deformable_group,
batch_size, num_channels, deformable_group, height_col, width_col, data_col](int64 start, int64 end) {
for (int64 index = start; index < end; index++) {
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const DType *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const DType *data_offset_ptr = data_offset
+ (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col
* width_col; //
const DType *data_mask_ptr = data_mask
+ (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; //
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
val = DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void DeformableConv2DCol2ImKernel(
const int n,
const DType *data_col, const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
DType *grad_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr =
data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index] * mask;
const int cur_h = (int) cur_inv_h_data;
const int cur_w = (int) cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
DType weight =
DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
CudaAtomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
#endif
template<typename T>
void MutexAdd(T *address, T val) {
static mutex mu;
std::lock_guard<mutex> lock(mu);
(*address) += val;
}
template<typename DType>
void DeformableConv2DCol2ImCPUKernel(const CPUDevice &d, const int n,
const DType *data_col, const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
DType *grad_im) {
auto f = [n, data_col, data_offset, data_mask, channels, height, width, kernel_h,
kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im](int64 start,
int64 end) {
for (int64 index = start; index < end; ++index) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr = data_mask
+ (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index] * mask;
const int cur_h = (int) cur_inv_h_data;
const int cur_w = (int) cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
DType weight =
DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
MutexAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
// *(grad_im + cur_bottom_grad_pos) += weight * cur_top_grad;
}
}
}
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void DeformableConv2DCol2ImCoordGPUKernel(
const int n,
const DType *data_col, const DType *data_im,
const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width, // C, H, W
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
DType *grad_offset, DType *grad_mask) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType *data_col_ptr =
data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const DType *data_im_ptr = data_im
+ (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w
* height * width;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr =
data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
} else {
mval += data_col_ptr[col_pos]
* DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const DType weight = DmcnGetCoordinateWeight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
grad_offset[index] = val;
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
if (offset_c % 2 == 0) {
grad_mask[
(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col
+ h) * width_col + w] = mval;
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
}
}
}
#endif
template<typename DType>
void DeformableConv2DCol2ImCoordCPUKernel(
const CPUDevice &d,
const int n,
const DType *data_col, const DType *data_im,
const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width, // C, H, W
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
DType *grad_offset, DType *grad_mask) {
auto f = [n, data_col, data_im, data_offset, data_mask, channels, height, width,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, batch_size, offset_channels, deformable_group,
height_col, width_col, grad_offset, grad_mask](int64 start, int64 end) {
for (int64 index = start; index < end; index++) {
DType val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType *data_col_ptr =
data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const DType *data_im_ptr = data_im
+ (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w
* height * width;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr = data_mask
+ (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
} else {
mval += data_col_ptr[col_pos]
* DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const DType weight = DmcnGetCoordinateWeight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
grad_offset[index] = val;
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
if (offset_c % 2 == 0) {
grad_mask[
(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col
+ h) * width_col + w] = mval;
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
}
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void PureAddToKernel(const int n, DType *result_data, const DType *right_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
CudaAtomicAdd(result_data + index, right_data[index]);
}
}
template<typename DType>
__global__ void SetZeroKernel(const int n, DType *result_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
*(result_data + index) = DType(0);
}
}
template<typename DType>
__global__ void SetOneKernel(const int n, DType *result_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
*(result_data + index) = DType(1);
}
}
#endif
template<typename DType>
__global__ void SetNumAtIndexKernel(DType num, int index, DType *data) {
*(data + index) = num;
}
template<typename DType>
void PureAddToKernel(const CPUDevice &d, const int n, DType *result_data, const DType *right_data) {
auto f = [n, result_data, right_data](int64 start, int64 end) {
for (int64 index = start; index < end; index++) {
*(result_data + index) += (right_data[index]);
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
template<typename DType>
void SetZeroKernel(const CPUDevice &d, const int n, DType *result_data) {
auto f = [n, result_data](int64 start, int64 end) {
for (int64 index = start; index < end; ++index) {
*(result_data + index) = DType(0);
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
template<typename DType>
void SetOneKernel(const CPUDevice &d, const int n, DType *result_data) {
auto f = [n, result_data](int64 start, int64 end) {
for (int64 index = start; index < end; ++index) {
*(result_data + index) = DType(1);
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
#ifdef GOOGLE_CUDA
template<typename DType>
void DeformableConv2DCol2ImCoord<GPUDevice, DType>::operator()(const Eigen::GpuDevice &d,
const DType *data_col,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const TShape &im_shape,
const TShape &col_shape,
const TShape &kernel_shape,
const TShape &pad,
const TShape &stride,
const TShape &dilation,
const int32_t deformable_group,
DType *grad_offset,
DType *grad_mask) {
int num_spatial_axes = kernel_shape.size();
int num_kernels =
col_shape[1] * col_shape[2] * col_shape[3] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group;
int channel_per_deformable_group = col_shape[0] / deformable_group;
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DeformableConv2DCol2ImCoordGPUKernel<DType>) , dim3(config.block_count), dim3(config.thread_per_block),
0, d.stream() ,
num_kernels, data_col, data_im, data_offset, data_mask, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], 2 * kernel_shape[0] * kernel_shape[1]
* deformable_group, deformable_group, col_shape[2], col_shape[3],
grad_offset, grad_mask);
// MSHADOW_CUDA_POST_KERNEL_CHECK(DeformableConv2DCol2ImCoordGPUKernel);
break;
default:LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template <typename DType>
void DeformableConv2DCol2Im<GPUDevice, DType>::operator()(
const GPUDevice& d,
const DType* data_col, const DType* data_offset, const DType* data_mask,
const TShape& im_shape, const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride,
const TShape& dilation, const int32_t deformable_group,
DType* grad_im)
{
int num_spatial_axes = kernel_shape.size();
int im_size = ProdShape(im_shape, 1, im_shape.size());
int channel_per_deformable_group = im_shape[1] / deformable_group;
int num_kernels = ProdShape(col_shape, 0, col_shape.size());
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
// using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DeformableConv2DCol2ImKernel<DType>), dim3(config.block_count), dim3(config.thread_per_block),
0, d.stream(),
num_kernels, data_col, data_offset, data_mask, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_col2im_gpu_kernel);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template <typename DType>
void DeformableConv2DIm2Col<GPUDevice, DType>::operator()(
const GPUDevice& d,
const DType* data_im, const DType* data_offset, const DType* data_mask,
const TShape& im_shape, const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride, const TShape& dilation,
const int32_t deformable_group, DType* data_col)
{
int num_spatial_axes = kernel_shape.size();
int channel_per_deformable_group = im_shape[1] / deformable_group; // imshape[1] =
int num_kernels = im_shape[1] * ProdShape(col_shape, 1, col_shape.size()); // K * N / k.Size(), k = filter, col_shape = [K, im2col_step_, H, W]
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
switch (num_spatial_axes) {
case 2:
DeformableConv2DIm2ColKernel<DType> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(config.block_count), dim3(config.thread_per_block), // blocknum_kernel,
0, d.stream(),
//CUDAdevice(GPU )cudaMalloc()hipFree()hipMemcpy()
//add() Cadd<<<MN>>> host(CPU)device
//Mblockblock N, M*N
num_kernels,
data_im,
data_offset,
data_mask,
im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1],
pad[0], pad[1],
stride[0], stride[1],
dilation[0], dilation[1],
channel_per_deformable_group,
col_shape[1], im_shape[1],
deformable_group,
col_shape[2], col_shape[3],
data_col);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_im2col_gpu_kernel);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template <typename DType>
void SetZeros<GPUDevice, DType>::operator()(const GPUDevice& d, int n, DType* result_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n ,d);
hipLaunchKernelGGL(( SetZeroKernel<DType>) , dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream() , n, result_data);
}
template <typename DType>
void PureAddTo<GPUDevice, DType>::operator()(const GPUDevice& d, const int n, DType* result_data, const DType* right_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
hipLaunchKernelGGL(( PureAddToKernel<DType>) , dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream() , n, result_data, right_data);
}
template <typename DType>
void SetOne<GPUDevice, DType>::operator()(const GPUDevice& d, int n, DType* result_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n ,d);
hipLaunchKernelGGL(( SetOneKernel<DType>) , dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream() , n, result_data);
}
template <typename DType>
void SetNumAtIndex<GPUDevice, DType>::operator()(const GPUDevice& d, DType num, int index, DType* data){
CudaLaunchConfig config = GetCudaLaunchConfig(1 ,d);
hipLaunchKernelGGL(( SetNumAtIndexKernel<DType>) , dim3(config.block_count), dim3(config.thread_per_block), 0, d.stream() , num, index, data);
}
// , .so undefined symbol: _ZN10tensorflow13setNumAtIndexIN5Eigen9GpuDeviceEfEclERKS2_fiPf
// I guess the reason for instancing the functional structure below is that certifying single functor instance for every functor.
template struct DeformableConv2DIm2Col<GPUDevice, double>;
template struct DeformableConv2DCol2Im<GPUDevice, double>;
template struct DeformableConv2DCol2ImCoord<GPUDevice, double>;
template struct PureAddTo<GPUDevice, double>;
template struct SetOne<GPUDevice, double>;
template struct SetZeros<GPUDevice, double>;
template struct SwapAxis<GPUDevice, double>;
template struct SetNumAtIndex<GPUDevice, double>;
template struct DeformableConv2DIm2Col<GPUDevice, float>;
template struct DeformableConv2DCol2Im<GPUDevice, float>;
template struct DeformableConv2DCol2ImCoord<GPUDevice, float>;
template struct PureAddTo<GPUDevice, float>;
template struct SetOne<GPUDevice, float>;
template struct SetZeros<GPUDevice, float>;
template struct SwapAxis<GPUDevice, float>;
template struct SetNumAtIndex<GPUDevice, float>;
#endif
template<typename DType>
void DeformableConv2DCol2ImCoord<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d,
const DType *data_col,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const TShape &im_shape,
const TShape &col_shape,
const TShape &kernel_shape,
const TShape &pad,
const TShape &stride,
const TShape &dilation,
const int32_t deformable_group,
DType *grad_offset,
DType *grad_mask) {
int num_spatial_axes = kernel_shape.size();
int num_kernels =
col_shape[1] * col_shape[2] * col_shape[3] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group;
int channel_per_deformable_group = col_shape[0] / deformable_group;
switch (num_spatial_axes) {
case 2:
DeformableConv2DCol2ImCoordCPUKernel<DType>(d,
num_kernels,
data_col,
data_im,
data_offset,
data_mask,
im_shape[1],
im_shape[2],
im_shape[3],
kernel_shape[0],
kernel_shape[1],
pad[0],
pad[1],
stride[0],
stride[1],
dilation[0],
dilation[1],
channel_per_deformable_group,
col_shape[1],
2 * kernel_shape[0] * kernel_shape[1] * deformable_group,
deformable_group,
col_shape[2],
col_shape[3],
grad_offset,
grad_mask);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << "spatial axes";
}
}
template<typename DType>
void DeformableConv2DCol2Im<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d,
const DType *data_col,
const DType *data_offset,
const DType *data_mask,
const TShape &im_shape,
const TShape &col_shape,
const TShape &kernel_shape,
const TShape &pad,
const TShape &stride,
const TShape &dilation,
const int32_t deformable_group,
DType *grad_im) {
int num_spatial_axes = kernel_shape.size();
int im_size = ProdShape(im_shape, 1, im_shape.size());
int channel_per_deformable_group = im_shape[1] / deformable_group;
int num_kernels = ProdShape(col_shape, 0, col_shape.size());
// num_axes should be smaller than block size
// using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
DeformableConv2DCol2ImCPUKernel<DType>(
d, num_kernels, data_col, data_offset, data_mask, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_col2im_gpu_kernel);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template<typename DType>
void DeformableConv2DIm2Col<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const TShape &im_shape,
const TShape &col_shape,
const TShape &kernel_shape,
const TShape &pad,
const TShape &stride,
const TShape &dilation,
const int32_t deformable_group,
DType *data_col) {
int num_spatial_axes = kernel_shape.size();
int channel_per_deformable_group = im_shape[1] / deformable_group; // imshape[1] =
int num_kernels = im_shape[1] * ProdShape(col_shape,
1,
col_shape.size()); // K * N / k.Size(), k = filter, col_shape = [K, im2col_step_, H, W]
switch (num_spatial_axes) {
case 2:
DeformableConv2DIm2ColCPUKernel<DType>(
d,
num_kernels,
data_im,
data_offset,
data_mask,
im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1],
pad[0], pad[1],
stride[0], stride[1],
dilation[0], dilation[1],
channel_per_deformable_group,
col_shape[1], im_shape[1],
deformable_group,
col_shape[2], col_shape[3],
data_col);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_im2col_gpu_kernel);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template<typename DType>
void SetZeros<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d, int n, DType *result_data) {
SetZeroKernel(d, n, result_data);
}
template<typename DType>
void PureAddTo<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d,
const int n,
DType *result_data,
const DType *right_data) {
PureAddToKernel(d, n, result_data, right_data);
}
template<typename DType>
void SetOne<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d, int n, DType *result_data) {
SetOneKernel(d, n, result_data);
}
template<typename DType>
void SetNumAtIndex<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d, DType num, int index, DType *data) {
*(data + index) = num;
}
#ifdef GOOGLE_CUDA
template <typename T>
se::DeviceMemory<T> AsDeviceMemory(const T* cuda_memory) {
se::DeviceMemoryBase wrapped(const_cast<T*>(cuda_memory));
se::DeviceMemory<T> typed(wrapped);
return typed;
}
class CublasScratchAllocator : public se::ScratchAllocator {
public:
using Stream = se::Stream;
using DeviceMemoryBytes = se::DeviceMemory<uint8>;
CublasScratchAllocator(OpKernelContext* context) : context_(context) {}
int64 GetMemoryLimitInBytes(Stream*) override { return -1; }
se::port::StatusOr<DeviceMemoryBytes> AllocateBytes(int64 byte_size) {
Tensor temporary_memory;
Status allocation_status(context_->allocate_temp(
DT_UINT8, TensorShape({byte_size}), &temporary_memory));
if (!allocation_status.ok()) {
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(nullptr, 0));
}
// Hold the reference of the allocated tensors until the end of the
// allocator.
allocated_tensors_.push_back(temporary_memory);
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(
temporary_memory.flat<uint8>().data(),
temporary_memory.flat<uint8>().size()));
}
se::port::StatusOr<DeviceMemoryBytes> AllocateBytes(
Stream* stream, int64 byte_size) override {
Tensor temporary_memory;
Status allocation_status(context_->allocate_temp(
DT_UINT8, TensorShape({byte_size}), &temporary_memory));
if (!allocation_status.ok()) {
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(nullptr, 0));
}
// Hold the reference of the allocated tensors until the end of the
// allocator.
allocated_tensors_.push_back(temporary_memory);
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(
temporary_memory.flat<uint8>().data(),
temporary_memory.flat<uint8>().size()));
}
private:
OpKernelContext* context_;
std::vector<Tensor> allocated_tensors_;
};
template <typename Scalar>
void LaunchBatchMatMul<GPUDevice, Scalar>::launch(OpKernelContext* context, const TensorShape& in_x_shape, const TensorShape& in_y_shape, const Scalar* in_x_ptr,
const Scalar* in_y_ptr, bool adj_x, bool adj_y, Scalar* out) {
constexpr se::blas::Transpose kTranspose =
is_complex<Scalar>::value ? se::blas::Transpose::kConjugateTranspose
: se::blas::Transpose::kTranspose;
se::blas::Transpose trans[] = {se::blas::Transpose::kNoTranspose,
kTranspose};
const uint64 m = in_x_shape.dim_size(adj_x ? 2 : 1);
const uint64 k = in_x_shape.dim_size(adj_x ? 1 : 2);
const uint64 n = in_y_shape.dim_size(adj_y ? 1 : 2);
const uint64 batch_size = in_x_shape.dim_size(0);
auto blas_transpose_a = trans[adj_x];
auto blas_transpose_b = trans[adj_y];
auto* stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available."));
typedef se::DeviceMemory<Scalar> DeviceMemoryType;
std::vector<DeviceMemoryType> a_device_memory;
std::vector<DeviceMemoryType> b_device_memory;
std::vector<DeviceMemoryType> c_device_memory;
std::vector<DeviceMemoryType*> a_ptrs;
std::vector<DeviceMemoryType*> b_ptrs;
std::vector<DeviceMemoryType*> c_ptrs;
a_device_memory.reserve(batch_size);
b_device_memory.reserve(batch_size);
c_device_memory.reserve(batch_size);
a_ptrs.reserve(batch_size);
b_ptrs.reserve(batch_size);
c_ptrs.reserve(batch_size);
auto* a_base_ptr = in_x_ptr;
auto* b_base_ptr = in_y_ptr;
auto* c_base_ptr = out;
for (int64 i = 0; i < batch_size; ++i) {
a_device_memory.push_back(AsDeviceMemory(a_base_ptr + i * m * k));
b_device_memory.push_back(AsDeviceMemory(b_base_ptr + i * k * n));
c_device_memory.push_back(AsDeviceMemory(c_base_ptr + i * m * n));
a_ptrs.push_back(&a_device_memory.back());
b_ptrs.push_back(&b_device_memory.back());
c_ptrs.push_back(&c_device_memory.back());
}
typedef Scalar Coefficient;
// Cublas does
// C = A x B
// where A, B and C are assumed to be in column major.
// We want the output to be in row-major, so we can compute
// C' = B' x A', where ' stands for transpose (not adjoint).
// TODO(yangzihao): Choose the best of the three strategies using autotune.
if (batch_size == 1) {
// This is a regular matrix*matrix or matrix*vector multiply. Avoid the
// overhead of the scratch allocator and the batch interface.
if (n == 1 &&
blas_transpose_b != se::blas::Transpose::kConjugateTranspose &&
blas_transpose_a != se::blas::Transpose::kConjugateTranspose) {
// This is a matrix*vector multiply so use GEMV to compute A * b.
// Here we are multiplying in the natural order, so we have to flip
// the transposition flag to compensate for the tensor being stored
// row-major. Since GEMV doesn't provide a way to just conjugate an
// argument, we have to defer those cases to GEMM below.
auto gemv_trans_a = blas_transpose_a == se::blas::Transpose::kTranspose
? se::blas::Transpose::kNoTranspose
: se::blas::Transpose::kTranspose;
bool blas_launch_status =
stream
->ThenBlasGemv(gemv_trans_a, adj_x ? m : k, adj_x ? k : m,
static_cast<Coefficient>(1.0), *(a_ptrs[0]),
adj_x ? m : k, *(b_ptrs[0]), 1,
static_cast<Coefficient>(0.0), c_ptrs[0], 1)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMV launch failed : a.shape=", in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k));
}
} else {
bool blas_launch_status =
stream
->ThenBlasGemm(blas_transpose_b, blas_transpose_a, n, m, k,
static_cast<Coefficient>(1.0), *(b_ptrs[0]),
adj_y ? k : n, *(a_ptrs[0]), adj_x ? m : k,
static_cast<Coefficient>(0.0), c_ptrs[0], n)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMM launch failed : a.shape=", in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k));
}
}
} else {
CublasScratchAllocator scratch_allocator(context);
bool blas_launch_status =
stream
->ThenBlasGemmBatchedWithScratch(
blas_transpose_b, blas_transpose_a, n, m, k,
static_cast<Coefficient>(1.0), b_ptrs, adj_y ? k : n, a_ptrs,
adj_x ? m : k, static_cast<Coefficient>(0.0), c_ptrs, n,
batch_size, &scratch_allocator)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMMBatched launch failed : a.shape=",
in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k, ", batch_size=", batch_size));
}
}
}
#endif
template<typename T>
void LaunchBatchMatMul<CPUDevice, T>::launch(OpKernelContext *context,
const TensorShape &in_x_shape,
const TensorShape &in_y_shape,
const T *in_x_ptr,
const T *in_y_ptr,
bool adj_x,
bool adj_y,
T *out) {
const int64 m = in_x_shape.dim_size(adj_x ? 2 : 1);
const int64 k = in_x_shape.dim_size(adj_x ? 1 : 2);
const int64 n = in_y_shape.dim_size(adj_y ? 1 : 2);
const uint64 batch_size = in_x_shape.dim_size(0);
Eigen::TensorMap<Eigen::Tensor<const T, 3, Eigen::RowMajor>> t_in_x(in_x_ptr, in_x_shape.AsEigenDSizes<3, Eigen::DenseIndex>());
Eigen::TensorMap<Eigen::Tensor<const T, 3, Eigen::RowMajor>> t_in_y(in_y_ptr, in_y_shape.AsEigenDSizes<3, Eigen::DenseIndex>());
Eigen::TensorMap<Eigen::Tensor<T, 3, Eigen::RowMajor>> t_out(out, batch_size, m, n);
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> contract_pairs;
contract_pairs[0] = ContractionDims(adj_x, adj_y);
auto &device = context->eigen_device<CPUDevice>();
for (int i = 0; i < t_out.dimension(0); ++i) {
t_out.template chip<0>(i).device(device) =
(t_in_x.template chip<0>(i)).template contract(t_in_y.template chip<0>(i), contract_pairs);
}
}
template
struct DeformableConv2DIm2Col<CPUDevice, double>;
template
struct DeformableConv2DCol2Im<CPUDevice, double>;
template
struct DeformableConv2DCol2ImCoord<CPUDevice, double>;
template
struct PureAddTo<CPUDevice, double>;
template
struct SetOne<CPUDevice, double>;
template
struct SetZeros<CPUDevice, double>;
template
struct SwapAxis<CPUDevice, double>;
template
struct SetNumAtIndex<CPUDevice, double>;
template
struct DeformableConv2DIm2Col<CPUDevice, float>;
template
struct DeformableConv2DCol2Im<CPUDevice, float>;
template
struct DeformableConv2DCol2ImCoord<CPUDevice, float>;
template
struct PureAddTo<CPUDevice, float>;
template
struct SetOne<CPUDevice, float>;
template
struct SetZeros<CPUDevice, float>;
template
struct SwapAxis<CPUDevice, float>;
template
struct SetNumAtIndex<CPUDevice, float>;
template
struct LaunchBatchMatMul<GPUDevice, float>;
template
struct LaunchBatchMatMul<GPUDevice, double>;
template
struct LaunchBatchMatMul<CPUDevice, float>;
template
struct LaunchBatchMatMul<CPUDevice, double>;
}
|
d1a925ae2492b239f7a84aeee1235590e1c16059.cu
|
//
// Created by 孙嘉禾 on 2019/12/31.
//
#ifdef __JETBRAINS_IDE__
#define __host__
#define __device__
#define __shared__
#define __constant__
#define __global__
// This is slightly mental, but gets it to properly index device function calls like __popc and whatever.
//#define __CUDACC__
// These headers are all implicitly present when you compile CUDA with clang. Clion doesn't know that, so
// we include them explicitly to make the indexer happy. Doing this when you actually build is, obviously,
// a terrible idea :D
//#include <__clang_cuda_builtin_vars.h>
//#include <__clang_cuda_intrinsics.h>
//#include <__clang_cuda_math_forward_declares.h>
//#include <__clang_cuda_complex_builtins.h>
//#include <__clang_cuda_cmath.h>
#endif // __JETBRAINS_IDE__
#include "deformable_conv2d.h"
#include <cstdlib>
#include "tensorflow/core/util/gpu_kernel_helper.h"
#ifdef GOOGLE_CUDA
#include "tensorflow/core/platform/stream_executor.h"
#endif
namespace tensorflow {
typedef Eigen::GpuDevice GPUDevice;
typedef Eigen::ThreadPoolDevice CPUDevice;
Eigen::IndexPair<Eigen::DenseIndex> ContractionDims(bool adj_x, bool adj_y) {
return Eigen::IndexPair<Eigen::DenseIndex>(adj_x ? 0 : 1, adj_y ? 1 : 0);
}
template<typename DType>
__host__ __device__ DType DmcnIm2colBilinear(const DType *bottom_data,
const int data_width,
const int height,
const int width,
DType h,
DType w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
DType lh = h - h_low;
DType lw = w - w_low;
DType hh = 1 - lh, hw = 1 - lw;
DType v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
DType v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
DType v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
DType v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template<typename DType>
__host__ __device__ DType DmcnGetGradientWeight(DType argmax_h,
DType argmax_w,
const int h,
const int w,
const int height,
const int width) {
/*
* offset h, offset w, (h, w) coordinate
*/
if (argmax_h <= -1 || argmax_w <= -1 || argmax_h >= height || argmax_w >= width) {
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
DType weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template<typename DType>
__host__ __device__ DType DmcnGetCoordinateWeight(DType argmax_h,
DType argmax_w,
const int height,
const int width,
const DType *im_data,
const int data_width,
const int bp_dir) {
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) {
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
DType weight = 0;
if (bp_dir == 0) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
} else if (bp_dir == 1) {
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void SwapAxisKernel(const int n, const int cuda_mem_size, const int min_unit_size,
DType *input_data, const int dim_num, const int axis_x_dims, const int axis_y_dims,
const int axis_x, const int axis_y) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType *device_data = new DType[cuda_mem_size];
DType *input_data_ptr = input_data + index * cuda_mem_size;
for (int j = 0; j < axis_y_dims; j++) {
for (int i = 0; i < axis_x_dims; i++) {
DType *temp_ptr = input_data_ptr + (i * axis_x_dims + j) * min_unit_size;
DType *device_data_temp_ptr = device_data + (j * axis_y_dims + i) * min_unit_size;
for (int k = 0; k < min_unit_size; k++) {
*(device_data_temp_ptr + k) = *(temp_ptr + k);
}
}
}
for (int i = 0; i < cuda_mem_size; i++) {
*(input_data_ptr + i) = *(device_data + i);
}
delete[]device_data;
}
}
#endif
template<typename DType>
void SwapAxisKernel(const CPUDevice &d, const int n, const int cuda_mem_size, const int min_unit_size,
DType *input_data, const int dim_num, const int axis_x_dims, const int axis_y_dims,
const int axis_x, const int axis_y) {
d.parallelFor(n,
Eigen::TensorOpCost(cuda_mem_size, cuda_mem_size, cuda_mem_size * axis_y_dims * axis_x_dims),
[min_unit_size, input_data, dim_num, axis_x_dims, axis_y_dims,
axis_x, axis_y, cuda_mem_size](int64 start, int64 end) {
for (int64 index = start; index < end; index++) {
DType *device_data = new DType[cuda_mem_size];
DType *input_data_ptr = input_data + index * cuda_mem_size;
for (int j = 0; j < axis_y_dims; j++) {
for (int i = 0; i < axis_x_dims; i++) {
DType *temp_ptr = input_data_ptr + (i * axis_x_dims + j) * min_unit_size;
DType *device_data_temp_ptr = device_data + (j * axis_y_dims + i) * min_unit_size;
for (int k = 0; k < min_unit_size; k++) {
*(device_data_temp_ptr + k) = *(temp_ptr + k);
}
}
}
for (int idx = 0; idx < cuda_mem_size; idx++) {
*(input_data_ptr + idx) = *(device_data + idx);
}
delete[] device_data;
}
});
}
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void DeformableConv2DIm2ColKernel(const int n,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group,
const int batch_size,
const int num_channels,
const int deformable_group,
const int height_col,
const int width_col,
DType *data_col) {
/*
* channel_per_deformable_group // 输入图通道数除以deformable_group的数量,
* //这里的batch_size代表的是im2col_step_, 一般就设为1了
*/
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const DType *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const DType *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h *
kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
val = DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
#endif
template<typename DType>
void DeformableConv2DIm2ColCPUKernel(const CPUDevice &d,
const int n,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h,
const int pad_w,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
const int channel_per_deformable_group, // 输入图通道数除以deformable_group的数量,
const int batch_size,
const int num_channels,
const int deformable_group, //这里的batch_size代表的是im2col_step_, 一般就设为1了
const int height_col,
const int width_col,
DType *data_col) {
auto f = [n, data_im, data_offset, data_mask, height, width, kernel_h, kernel_w,
pad_h, pad_w, stride_w, stride_h, dilation_w, dilation_h, channel_per_deformable_group,
batch_size, num_channels, deformable_group, height_col, width_col, data_col](int64 start, int64 end) {
for (int64 index = start; index < end; index++) {
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
DType *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const DType *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const DType *data_offset_ptr = data_offset
+ (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col
* width_col; //
const DType *data_mask_ptr = data_mask
+ (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; //
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
val = DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void DeformableConv2DCol2ImKernel(
const int n,
const DType *data_col, const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
DType *grad_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr =
data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index] * mask;
const int cur_h = (int) cur_inv_h_data;
const int cur_w = (int) cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
DType weight =
DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
CudaAtomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
#endif
template<typename T>
void MutexAdd(T *address, T val) {
static mutex mu;
std::lock_guard<mutex> lock(mu);
(*address) += val;
}
template<typename DType>
void DeformableConv2DCol2ImCPUKernel(const CPUDevice &d, const int n,
const DType *data_col, const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
DType *grad_im) {
auto f = [n, data_col, data_offset, data_mask, channels, height, width, kernel_h,
kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im](int64 start,
int64 end) {
for (int64 index = start; index < end; ++index) {
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr = data_mask
+ (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index] * mask;
const int cur_h = (int) cur_inv_h_data;
const int cur_w = (int) cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
DType weight =
DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
MutexAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
// *(grad_im + cur_bottom_grad_pos) += weight * cur_top_grad;
}
}
}
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void DeformableConv2DCol2ImCoordGPUKernel(
const int n,
const DType *data_col, const DType *data_im,
const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width, // 输入的C, H, W
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
DType *grad_offset, DType *grad_mask) {
CUDA_1D_KERNEL_LOOP(index, n) {
DType val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType *data_col_ptr =
data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const DType *data_im_ptr = data_im
+ (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w
* height * width;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr =
data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
} else {
mval += data_col_ptr[col_pos]
* DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const DType weight = DmcnGetCoordinateWeight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
grad_offset[index] = val;
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
if (offset_c % 2 == 0) {
grad_mask[
(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col
+ h) * width_col + w] = mval;
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
}
}
}
#endif
template<typename DType>
void DeformableConv2DCol2ImCoordCPUKernel(
const CPUDevice &d,
const int n,
const DType *data_col, const DType *data_im,
const DType *data_offset, const DType *data_mask,
const int channels, const int height, const int width, // 输入的C, H, W
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
DType *grad_offset, DType *grad_mask) {
auto f = [n, data_col, data_im, data_offset, data_mask, channels, height, width,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, batch_size, offset_channels, deformable_group,
height_col, width_col, grad_offset, grad_mask](int64 start, int64 end) {
for (int64 index = start; index < end; index++) {
DType val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const DType *data_col_ptr =
data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const DType *data_im_ptr = data_im
+ (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w
* height * width;
const DType *data_offset_ptr = data_offset
+ (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const DType *data_mask_ptr = data_mask
+ (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) {
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType mask = data_mask_ptr[data_mask_hw_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
} else {
mval += data_col_ptr[col_pos]
* DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const DType weight = DmcnGetCoordinateWeight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
grad_offset[index] = val;
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
if (offset_c % 2 == 0) {
grad_mask[
(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col
+ h) * width_col + w] = mval;
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
}
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
#ifdef GOOGLE_CUDA
template<typename DType>
__global__ void PureAddToKernel(const int n, DType *result_data, const DType *right_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
CudaAtomicAdd(result_data + index, right_data[index]);
}
}
template<typename DType>
__global__ void SetZeroKernel(const int n, DType *result_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
*(result_data + index) = DType(0);
}
}
template<typename DType>
__global__ void SetOneKernel(const int n, DType *result_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
*(result_data + index) = DType(1);
}
}
#endif
template<typename DType>
__global__ void SetNumAtIndexKernel(DType num, int index, DType *data) {
*(data + index) = num;
}
template<typename DType>
void PureAddToKernel(const CPUDevice &d, const int n, DType *result_data, const DType *right_data) {
auto f = [n, result_data, right_data](int64 start, int64 end) {
for (int64 index = start; index < end; index++) {
*(result_data + index) += (right_data[index]);
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
template<typename DType>
void SetZeroKernel(const CPUDevice &d, const int n, DType *result_data) {
auto f = [n, result_data](int64 start, int64 end) {
for (int64 index = start; index < end; ++index) {
*(result_data + index) = DType(0);
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
template<typename DType>
void SetOneKernel(const CPUDevice &d, const int n, DType *result_data) {
auto f = [n, result_data](int64 start, int64 end) {
for (int64 index = start; index < end; ++index) {
*(result_data + index) = DType(1);
}
};
d.parallelFor(n, Eigen::TensorOpCost(n, n, n), f);
}
#ifdef GOOGLE_CUDA
template<typename DType>
void DeformableConv2DCol2ImCoord<GPUDevice, DType>::operator()(const Eigen::GpuDevice &d,
const DType *data_col,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const TShape &im_shape,
const TShape &col_shape,
const TShape &kernel_shape,
const TShape &pad,
const TShape &stride,
const TShape &dilation,
const int32_t deformable_group,
DType *grad_offset,
DType *grad_mask) {
int num_spatial_axes = kernel_shape.size();
int num_kernels =
col_shape[1] * col_shape[2] * col_shape[3] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group;
int channel_per_deformable_group = col_shape[0] / deformable_group;
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
DeformableConv2DCol2ImCoordGPUKernel<DType> <<< config.block_count, config.thread_per_block,
0, d.stream() >>> (
num_kernels, data_col, data_im, data_offset, data_mask, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], 2 * kernel_shape[0] * kernel_shape[1]
* deformable_group, deformable_group, col_shape[2], col_shape[3],
grad_offset, grad_mask);
// MSHADOW_CUDA_POST_KERNEL_CHECK(DeformableConv2DCol2ImCoordGPUKernel);
break;
default:LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template <typename DType>
void DeformableConv2DCol2Im<GPUDevice, DType>::operator()(
const GPUDevice& d,
const DType* data_col, const DType* data_offset, const DType* data_mask,
const TShape& im_shape, const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride,
const TShape& dilation, const int32_t deformable_group,
DType* grad_im)
{
int num_spatial_axes = kernel_shape.size();
int im_size = ProdShape(im_shape, 1, im_shape.size());
int channel_per_deformable_group = im_shape[1] / deformable_group;
int num_kernels = ProdShape(col_shape, 0, col_shape.size());
// num_axes should be smaller than block size
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
// using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
DeformableConv2DCol2ImKernel<DType><<<config.block_count, config.thread_per_block,
0, d.stream()>>>(
num_kernels, data_col, data_offset, data_mask, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_col2im_gpu_kernel);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template <typename DType>
void DeformableConv2DIm2Col<GPUDevice, DType>::operator()(
const GPUDevice& d,
const DType* data_im, const DType* data_offset, const DType* data_mask,
const TShape& im_shape, const TShape& col_shape, const TShape& kernel_shape,
const TShape& pad, const TShape& stride, const TShape& dilation,
const int32_t deformable_group, DType* data_col)
{
int num_spatial_axes = kernel_shape.size();
int channel_per_deformable_group = im_shape[1] / deformable_group; // imshape[1] = 输入图的通道数
int num_kernels = im_shape[1] * ProdShape(col_shape, 1, col_shape.size()); // K * N / k.Size(), k = filter, col_shape = [K, im2col_step_, H, W]
CudaLaunchConfig config = GetCudaLaunchConfig(num_kernels, d);
CHECK_LT(num_spatial_axes, config.thread_per_block);
switch (num_spatial_axes) {
case 2:
DeformableConv2DIm2ColKernel<DType> // NOLINT_NEXT_LINE(whitespace/operators)
<<<config.block_count, config.thread_per_block, // 注意这里申请的block的个数是num_kernel个,
0, d.stream()>>>(
//CUDA对device(GPU )的内存管理主要通过cudaMalloc()、cudaFree()、cudaMemcpy() 进行管理。另外,从上述代码我们可以看到,
//add() 函数的调用比较奇怪相对于C语言来说,需要用add<<<M,N>>> 这种形式表明这是一个从host(CPU)代码调用device的代码,
//并且括号中的数值表明,M个block,每个block有 N个线程, 所以这个函数总共有M*N个线程。
num_kernels,
data_im,
data_offset,
data_mask,
im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1],
pad[0], pad[1],
stride[0], stride[1],
dilation[0], dilation[1],
channel_per_deformable_group,
col_shape[1], im_shape[1],
deformable_group,
col_shape[2], col_shape[3],
data_col);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_im2col_gpu_kernel);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template <typename DType>
void SetZeros<GPUDevice, DType>::operator()(const GPUDevice& d, int n, DType* result_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n ,d);
SetZeroKernel<DType> <<< config.block_count, config.thread_per_block, 0, d.stream() >>>(n, result_data);
}
template <typename DType>
void PureAddTo<GPUDevice, DType>::operator()(const GPUDevice& d, const int n, DType* result_data, const DType* right_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n, d);
PureAddToKernel<DType> <<< config.block_count, config.thread_per_block, 0, d.stream() >>>(n, result_data, right_data);
}
template <typename DType>
void SetOne<GPUDevice, DType>::operator()(const GPUDevice& d, int n, DType* result_data){
CudaLaunchConfig config = GetCudaLaunchConfig(n ,d);
SetOneKernel<DType> <<< config.block_count, config.thread_per_block, 0, d.stream() >>>(n, result_data);
}
template <typename DType>
void SetNumAtIndex<GPUDevice, DType>::operator()(const GPUDevice& d, DType num, int index, DType* data){
CudaLaunchConfig config = GetCudaLaunchConfig(1 ,d);
SetNumAtIndexKernel<DType> <<<config.block_count, config.thread_per_block, 0, d.stream() >>>(num, index, data);
}
// 如果没有在这里实例化的话, 生成的.so会报类似于 undefined symbol: _ZN10tensorflow13setNumAtIndexIN5Eigen9GpuDeviceEfEclERKS2_fiPf的错误
// I guess the reason for instancing the functional structure below is that certifying single functor instance for every functor.
template struct DeformableConv2DIm2Col<GPUDevice, double>;
template struct DeformableConv2DCol2Im<GPUDevice, double>;
template struct DeformableConv2DCol2ImCoord<GPUDevice, double>;
template struct PureAddTo<GPUDevice, double>;
template struct SetOne<GPUDevice, double>;
template struct SetZeros<GPUDevice, double>;
template struct SwapAxis<GPUDevice, double>;
template struct SetNumAtIndex<GPUDevice, double>;
template struct DeformableConv2DIm2Col<GPUDevice, float>;
template struct DeformableConv2DCol2Im<GPUDevice, float>;
template struct DeformableConv2DCol2ImCoord<GPUDevice, float>;
template struct PureAddTo<GPUDevice, float>;
template struct SetOne<GPUDevice, float>;
template struct SetZeros<GPUDevice, float>;
template struct SwapAxis<GPUDevice, float>;
template struct SetNumAtIndex<GPUDevice, float>;
#endif
template<typename DType>
void DeformableConv2DCol2ImCoord<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d,
const DType *data_col,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const TShape &im_shape,
const TShape &col_shape,
const TShape &kernel_shape,
const TShape &pad,
const TShape &stride,
const TShape &dilation,
const int32_t deformable_group,
DType *grad_offset,
DType *grad_mask) {
int num_spatial_axes = kernel_shape.size();
int num_kernels =
col_shape[1] * col_shape[2] * col_shape[3] * 2 * kernel_shape[0] * kernel_shape[1] * deformable_group;
int channel_per_deformable_group = col_shape[0] / deformable_group;
switch (num_spatial_axes) {
case 2:
DeformableConv2DCol2ImCoordCPUKernel<DType>(d,
num_kernels,
data_col,
data_im,
data_offset,
data_mask,
im_shape[1],
im_shape[2],
im_shape[3],
kernel_shape[0],
kernel_shape[1],
pad[0],
pad[1],
stride[0],
stride[1],
dilation[0],
dilation[1],
channel_per_deformable_group,
col_shape[1],
2 * kernel_shape[0] * kernel_shape[1] * deformable_group,
deformable_group,
col_shape[2],
col_shape[3],
grad_offset,
grad_mask);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << "spatial axes";
}
}
template<typename DType>
void DeformableConv2DCol2Im<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d,
const DType *data_col,
const DType *data_offset,
const DType *data_mask,
const TShape &im_shape,
const TShape &col_shape,
const TShape &kernel_shape,
const TShape &pad,
const TShape &stride,
const TShape &dilation,
const int32_t deformable_group,
DType *grad_im) {
int num_spatial_axes = kernel_shape.size();
int im_size = ProdShape(im_shape, 1, im_shape.size());
int channel_per_deformable_group = im_shape[1] / deformable_group;
int num_kernels = ProdShape(col_shape, 0, col_shape.size());
// num_axes should be smaller than block size
// using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
DeformableConv2DCol2ImCPUKernel<DType>(
d, num_kernels, data_col, data_offset, data_mask, im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group,
col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_col2im_gpu_kernel);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template<typename DType>
void DeformableConv2DIm2Col<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d,
const DType *data_im,
const DType *data_offset,
const DType *data_mask,
const TShape &im_shape,
const TShape &col_shape,
const TShape &kernel_shape,
const TShape &pad,
const TShape &stride,
const TShape &dilation,
const int32_t deformable_group,
DType *data_col) {
int num_spatial_axes = kernel_shape.size();
int channel_per_deformable_group = im_shape[1] / deformable_group; // imshape[1] = 输入图的通道数
int num_kernels = im_shape[1] * ProdShape(col_shape,
1,
col_shape.size()); // K * N / k.Size(), k = filter, col_shape = [K, im2col_step_, H, W]
switch (num_spatial_axes) {
case 2:
DeformableConv2DIm2ColCPUKernel<DType>(
d,
num_kernels,
data_im,
data_offset,
data_mask,
im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1],
pad[0], pad[1],
stride[0], stride[1],
dilation[0], dilation[1],
channel_per_deformable_group,
col_shape[1], im_shape[1],
deformable_group,
col_shape[2], col_shape[3],
data_col);
// MSHADOW_CUDA_POST_KERNEL_CHECK(modulated_deformable_im2col_gpu_kernel);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
template<typename DType>
void SetZeros<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d, int n, DType *result_data) {
SetZeroKernel(d, n, result_data);
}
template<typename DType>
void PureAddTo<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d,
const int n,
DType *result_data,
const DType *right_data) {
PureAddToKernel(d, n, result_data, right_data);
}
template<typename DType>
void SetOne<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d, int n, DType *result_data) {
SetOneKernel(d, n, result_data);
}
template<typename DType>
void SetNumAtIndex<CPUDevice, DType>::operator()(const Eigen::ThreadPoolDevice &d, DType num, int index, DType *data) {
*(data + index) = num;
}
#ifdef GOOGLE_CUDA
template <typename T>
se::DeviceMemory<T> AsDeviceMemory(const T* cuda_memory) {
se::DeviceMemoryBase wrapped(const_cast<T*>(cuda_memory));
se::DeviceMemory<T> typed(wrapped);
return typed;
}
class CublasScratchAllocator : public se::ScratchAllocator {
public:
using Stream = se::Stream;
using DeviceMemoryBytes = se::DeviceMemory<uint8>;
CublasScratchAllocator(OpKernelContext* context) : context_(context) {}
int64 GetMemoryLimitInBytes(Stream*) override { return -1; }
se::port::StatusOr<DeviceMemoryBytes> AllocateBytes(int64 byte_size) {
Tensor temporary_memory;
Status allocation_status(context_->allocate_temp(
DT_UINT8, TensorShape({byte_size}), &temporary_memory));
if (!allocation_status.ok()) {
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(nullptr, 0));
}
// Hold the reference of the allocated tensors until the end of the
// allocator.
allocated_tensors_.push_back(temporary_memory);
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(
temporary_memory.flat<uint8>().data(),
temporary_memory.flat<uint8>().size()));
}
se::port::StatusOr<DeviceMemoryBytes> AllocateBytes(
Stream* stream, int64 byte_size) override {
Tensor temporary_memory;
Status allocation_status(context_->allocate_temp(
DT_UINT8, TensorShape({byte_size}), &temporary_memory));
if (!allocation_status.ok()) {
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(nullptr, 0));
}
// Hold the reference of the allocated tensors until the end of the
// allocator.
allocated_tensors_.push_back(temporary_memory);
return se::port::StatusOr<DeviceMemoryBytes>(
DeviceMemoryBytes::MakeFromByteSize(
temporary_memory.flat<uint8>().data(),
temporary_memory.flat<uint8>().size()));
}
private:
OpKernelContext* context_;
std::vector<Tensor> allocated_tensors_;
};
template <typename Scalar>
void LaunchBatchMatMul<GPUDevice, Scalar>::launch(OpKernelContext* context, const TensorShape& in_x_shape, const TensorShape& in_y_shape, const Scalar* in_x_ptr,
const Scalar* in_y_ptr, bool adj_x, bool adj_y, Scalar* out) {
constexpr se::blas::Transpose kTranspose =
is_complex<Scalar>::value ? se::blas::Transpose::kConjugateTranspose
: se::blas::Transpose::kTranspose;
se::blas::Transpose trans[] = {se::blas::Transpose::kNoTranspose,
kTranspose};
const uint64 m = in_x_shape.dim_size(adj_x ? 2 : 1);
const uint64 k = in_x_shape.dim_size(adj_x ? 1 : 2);
const uint64 n = in_y_shape.dim_size(adj_y ? 1 : 2);
const uint64 batch_size = in_x_shape.dim_size(0);
auto blas_transpose_a = trans[adj_x];
auto blas_transpose_b = trans[adj_y];
auto* stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available."));
typedef se::DeviceMemory<Scalar> DeviceMemoryType;
std::vector<DeviceMemoryType> a_device_memory;
std::vector<DeviceMemoryType> b_device_memory;
std::vector<DeviceMemoryType> c_device_memory;
std::vector<DeviceMemoryType*> a_ptrs;
std::vector<DeviceMemoryType*> b_ptrs;
std::vector<DeviceMemoryType*> c_ptrs;
a_device_memory.reserve(batch_size);
b_device_memory.reserve(batch_size);
c_device_memory.reserve(batch_size);
a_ptrs.reserve(batch_size);
b_ptrs.reserve(batch_size);
c_ptrs.reserve(batch_size);
auto* a_base_ptr = in_x_ptr;
auto* b_base_ptr = in_y_ptr;
auto* c_base_ptr = out;
for (int64 i = 0; i < batch_size; ++i) {
a_device_memory.push_back(AsDeviceMemory(a_base_ptr + i * m * k));
b_device_memory.push_back(AsDeviceMemory(b_base_ptr + i * k * n));
c_device_memory.push_back(AsDeviceMemory(c_base_ptr + i * m * n));
a_ptrs.push_back(&a_device_memory.back());
b_ptrs.push_back(&b_device_memory.back());
c_ptrs.push_back(&c_device_memory.back());
}
typedef Scalar Coefficient;
// Cublas does
// C = A x B
// where A, B and C are assumed to be in column major.
// We want the output to be in row-major, so we can compute
// C' = B' x A', where ' stands for transpose (not adjoint).
// TODO(yangzihao): Choose the best of the three strategies using autotune.
if (batch_size == 1) {
// This is a regular matrix*matrix or matrix*vector multiply. Avoid the
// overhead of the scratch allocator and the batch interface.
if (n == 1 &&
blas_transpose_b != se::blas::Transpose::kConjugateTranspose &&
blas_transpose_a != se::blas::Transpose::kConjugateTranspose) {
// This is a matrix*vector multiply so use GEMV to compute A * b.
// Here we are multiplying in the natural order, so we have to flip
// the transposition flag to compensate for the tensor being stored
// row-major. Since GEMV doesn't provide a way to just conjugate an
// argument, we have to defer those cases to GEMM below.
auto gemv_trans_a = blas_transpose_a == se::blas::Transpose::kTranspose
? se::blas::Transpose::kNoTranspose
: se::blas::Transpose::kTranspose;
bool blas_launch_status =
stream
->ThenBlasGemv(gemv_trans_a, adj_x ? m : k, adj_x ? k : m,
static_cast<Coefficient>(1.0), *(a_ptrs[0]),
adj_x ? m : k, *(b_ptrs[0]), 1,
static_cast<Coefficient>(0.0), c_ptrs[0], 1)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMV launch failed : a.shape=", in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k));
}
} else {
bool blas_launch_status =
stream
->ThenBlasGemm(blas_transpose_b, blas_transpose_a, n, m, k,
static_cast<Coefficient>(1.0), *(b_ptrs[0]),
adj_y ? k : n, *(a_ptrs[0]), adj_x ? m : k,
static_cast<Coefficient>(0.0), c_ptrs[0], n)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMM launch failed : a.shape=", in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k));
}
}
} else {
CublasScratchAllocator scratch_allocator(context);
bool blas_launch_status =
stream
->ThenBlasGemmBatchedWithScratch(
blas_transpose_b, blas_transpose_a, n, m, k,
static_cast<Coefficient>(1.0), b_ptrs, adj_y ? k : n, a_ptrs,
adj_x ? m : k, static_cast<Coefficient>(0.0), c_ptrs, n,
batch_size, &scratch_allocator)
.ok();
if (!blas_launch_status) {
context->SetStatus(errors::Internal(
"Blas xGEMMBatched launch failed : a.shape=",
in_x_shape.DebugString(),
", b.shape=", in_y_shape.DebugString(), ", m=", m, ", n=", n,
", k=", k, ", batch_size=", batch_size));
}
}
}
#endif
template<typename T>
void LaunchBatchMatMul<CPUDevice, T>::launch(OpKernelContext *context,
const TensorShape &in_x_shape,
const TensorShape &in_y_shape,
const T *in_x_ptr,
const T *in_y_ptr,
bool adj_x,
bool adj_y,
T *out) {
const int64 m = in_x_shape.dim_size(adj_x ? 2 : 1);
const int64 k = in_x_shape.dim_size(adj_x ? 1 : 2);
const int64 n = in_y_shape.dim_size(adj_y ? 1 : 2);
const uint64 batch_size = in_x_shape.dim_size(0);
Eigen::TensorMap<Eigen::Tensor<const T, 3, Eigen::RowMajor>> t_in_x(in_x_ptr, in_x_shape.AsEigenDSizes<3, Eigen::DenseIndex>());
Eigen::TensorMap<Eigen::Tensor<const T, 3, Eigen::RowMajor>> t_in_y(in_y_ptr, in_y_shape.AsEigenDSizes<3, Eigen::DenseIndex>());
Eigen::TensorMap<Eigen::Tensor<T, 3, Eigen::RowMajor>> t_out(out, batch_size, m, n);
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> contract_pairs;
contract_pairs[0] = ContractionDims(adj_x, adj_y);
auto &device = context->eigen_device<CPUDevice>();
for (int i = 0; i < t_out.dimension(0); ++i) {
t_out.template chip<0>(i).device(device) =
(t_in_x.template chip<0>(i)).template contract(t_in_y.template chip<0>(i), contract_pairs);
}
}
template
struct DeformableConv2DIm2Col<CPUDevice, double>;
template
struct DeformableConv2DCol2Im<CPUDevice, double>;
template
struct DeformableConv2DCol2ImCoord<CPUDevice, double>;
template
struct PureAddTo<CPUDevice, double>;
template
struct SetOne<CPUDevice, double>;
template
struct SetZeros<CPUDevice, double>;
template
struct SwapAxis<CPUDevice, double>;
template
struct SetNumAtIndex<CPUDevice, double>;
template
struct DeformableConv2DIm2Col<CPUDevice, float>;
template
struct DeformableConv2DCol2Im<CPUDevice, float>;
template
struct DeformableConv2DCol2ImCoord<CPUDevice, float>;
template
struct PureAddTo<CPUDevice, float>;
template
struct SetOne<CPUDevice, float>;
template
struct SetZeros<CPUDevice, float>;
template
struct SwapAxis<CPUDevice, float>;
template
struct SetNumAtIndex<CPUDevice, float>;
template
struct LaunchBatchMatMul<GPUDevice, float>;
template
struct LaunchBatchMatMul<GPUDevice, double>;
template
struct LaunchBatchMatMul<CPUDevice, float>;
template
struct LaunchBatchMatMul<CPUDevice, double>;
}
|
fdbd935520332491bd2412c345aa2708122e46cb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 512;
#define M 512;
#define CHANNELS 3;
void colorTogrey(int *, int *,int,int,int);
// we have 3 channels corresponding to RGB
// The input image is encoded as unsigned characters [0, 255]
__global__
void colorToGreyscaleConvertion(int *Pin_d, int *Pout_d,
int width, int height) {
int Col = threadIdx.x + blockIdx.x * blockDim.x;
int Row = threadIdx.y + blockIdx.y * blockDim.y;
if (Col < (width) && Row < height) {
// get 1D coordinate for the grayscale image
int greyOffset = Row*width + Col;
// one can think of the RGB image having
// CHANNEL times columns of the gray scale image
int rgbOffset = greyOffset*3;
unsigned int r = Pin_d[rgbOffset ]; // red value for pixel
unsigned int g = Pin_d[rgbOffset + 1]; // green value for pixel
unsigned int b = Pin_d[rgbOffset + 2]; // blue value for pixel
// perform the rescaling and store it
// We multiply by floating point constants
Pout_d[greyOffset] = 0.21f*r + 0.72f*g + 0.07f*b;
}
}
int main()
{
int n=N;int m=M; int c=CHANNELS;
int *Pin_h = (int*) malloc( sizeof(int)*n*m*c);
int ind=0;
unsigned int tmp;
FILE *fp;
fp=fopen("test_image_lena.txt","r");
while (!feof(fp)){
fscanf(fp,"%d",&tmp);
Pin_h[ind]=tmp;
ind=ind+1;
}
fclose(fp);
int *Pout_h = (int*) malloc( sizeof(int)*n*m);
colorTogrey ( Pin_h, Pout_h, n, m, c);
FILE *fp3;
fp3=fopen("testImage_Results_lena.txt","w");
for (int i=0; i < m; i++){
for (int j=0; j < n; j++){
fprintf(fp3,"%4d ",Pout_h[i*n+j]);}
fprintf(fp3,"\n");
}
fclose(fp3);
// free the memory we allocated on the CPU
free( Pin_h);
free( Pout_h );
return 0;
}
void colorTogrey(int *Pin_h, int *Pout_h, int n, int m, int c)
{
int size_in = (n *m* c*sizeof(int)); int size_out = (n*m*sizeof(int));
int *Pin_d; int *Pout_d;
// Transfer Pin_h to device memory
hipMalloc((void **) &Pin_d, size_in);
hipMemcpy(Pin_d, Pin_h, size_in, hipMemcpyHostToDevice);
// Allocate device memory for Pout_d
hipMalloc((void **) &Pout_d, size_out);
dim3 dimGrid(ceil(n/16), ceil(m/16), 1);
dim3 dimBlock(16,16,1);
hipLaunchKernelGGL((
colorToGreyscaleConvertion), dim3(dimGrid),dim3(dimBlock), 0, 0, Pin_d, Pout_d, n, m);
// Transfer Pout_d from device to host
hipMemcpy(Pout_h, Pout_d, size_out, hipMemcpyDeviceToHost);
// Free device memory for A_d, B_d, C_d
hipFree(Pin_d); hipFree(Pout_d);
}
|
fdbd935520332491bd2412c345aa2708122e46cb.cu
|
#include <cuda.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 512;
#define M 512;
#define CHANNELS 3;
void colorTogrey(int *, int *,int,int,int);
// we have 3 channels corresponding to RGB
// The input image is encoded as unsigned characters [0, 255]
__global__
void colorToGreyscaleConvertion(int *Pin_d, int *Pout_d,
int width, int height) {
int Col = threadIdx.x + blockIdx.x * blockDim.x;
int Row = threadIdx.y + blockIdx.y * blockDim.y;
if (Col < (width) && Row < height) {
// get 1D coordinate for the grayscale image
int greyOffset = Row*width + Col;
// one can think of the RGB image having
// CHANNEL times columns of the gray scale image
int rgbOffset = greyOffset*3;
unsigned int r = Pin_d[rgbOffset ]; // red value for pixel
unsigned int g = Pin_d[rgbOffset + 1]; // green value for pixel
unsigned int b = Pin_d[rgbOffset + 2]; // blue value for pixel
// perform the rescaling and store it
// We multiply by floating point constants
Pout_d[greyOffset] = 0.21f*r + 0.72f*g + 0.07f*b;
}
}
int main()
{
int n=N;int m=M; int c=CHANNELS;
int *Pin_h = (int*) malloc( sizeof(int)*n*m*c);
int ind=0;
unsigned int tmp;
FILE *fp;
fp=fopen("test_image_lena.txt","r");
while (!feof(fp)){
fscanf(fp,"%d",&tmp);
Pin_h[ind]=tmp;
ind=ind+1;
}
fclose(fp);
int *Pout_h = (int*) malloc( sizeof(int)*n*m);
colorTogrey ( Pin_h, Pout_h, n, m, c);
FILE *fp3;
fp3=fopen("testImage_Results_lena.txt","w");
for (int i=0; i < m; i++){
for (int j=0; j < n; j++){
fprintf(fp3,"%4d ",Pout_h[i*n+j]);}
fprintf(fp3,"\n");
}
fclose(fp3);
// free the memory we allocated on the CPU
free( Pin_h);
free( Pout_h );
return 0;
}
void colorTogrey(int *Pin_h, int *Pout_h, int n, int m, int c)
{
int size_in = (n *m* c*sizeof(int)); int size_out = (n*m*sizeof(int));
int *Pin_d; int *Pout_d;
// Transfer Pin_h to device memory
cudaMalloc((void **) &Pin_d, size_in);
cudaMemcpy(Pin_d, Pin_h, size_in, cudaMemcpyHostToDevice);
// Allocate device memory for Pout_d
cudaMalloc((void **) &Pout_d, size_out);
dim3 dimGrid(ceil(n/16), ceil(m/16), 1);
dim3 dimBlock(16,16,1);
colorToGreyscaleConvertion<<<dimGrid,dimBlock>>>(Pin_d, Pout_d, n, m);
// Transfer Pout_d from device to host
cudaMemcpy(Pout_h, Pout_d, size_out, cudaMemcpyDeviceToHost);
// Free device memory for A_d, B_d, C_d
cudaFree(Pin_d); cudaFree(Pout_d);
}
|
d33095682738c0418ef94c224126b76355b86857.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
// =============================================================================
// Auxiliary routine to compute piv final destination for the current step
/******************************************************************************/
static __device__ void setup_pivinfo_devfunc(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
// initialize pivinfo (could be done in a separate kernel using multiple thread block
for (int s =0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS < m) && (tid < MAX_NTHREADS) )
pivinfo[tid + s * MAX_NTHREADS] = tid + s * MAX_NTHREADS + 1;
}
__syncthreads();
if (tid == 0)
{
int i, itsreplacement, mynewrowid;
for (i=0; i < nb; i++) {
mynewrowid = ipiv[i]-1; //-1 to get the index in C
itsreplacement = pivinfo[mynewrowid];
pivinfo[mynewrowid] = pivinfo[i];
pivinfo[i] = itsreplacement;
}
}
}
/******************************************************************************/
__global__ void setup_pivinfo_kernel_batched(magma_int_t **pivinfo_array, magma_int_t **ipiv_array, int m, int nb)
{
int batchid = blockIdx.x;
setup_pivinfo_devfunc(pivinfo_array[batchid], ipiv_array[batchid], m, nb);
}
/******************************************************************************/
__global__ void setup_pivinfo_kernel(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
setup_pivinfo_devfunc(pivinfo, ipiv, m, nb);
}
/******************************************************************************/
extern "C" void
setup_pivinfo_batched( magma_int_t **pivinfo_array, magma_int_t **ipiv_array,
magma_int_t m, magma_int_t nb,
magma_int_t batchCount,
magma_queue_t queue)
{
if (nb == 0 ) return;
hipLaunchKernelGGL(( setup_pivinfo_kernel_batched)
, dim3(batchCount), dim3(min(m, MAX_NTHREADS)), 0, queue->cuda_stream() ,
pivinfo_array, ipiv_array, m, nb);
}
/******************************************************************************/
extern "C" void
setup_pivinfo( magma_int_t *pivinfo, magma_int_t *ipiv,
magma_int_t m, magma_int_t nb,
magma_queue_t queue)
{
if (nb == 0 ) return;
hipLaunchKernelGGL(( setup_pivinfo_kernel)
, dim3(1), dim3(min(m, MAX_NTHREADS)), 0, queue->cuda_stream() ,
pivinfo, ipiv, m, nb);
}
// =============================================================================
// Auxiliary routine to adjust ipiv
/******************************************************************************/
static __device__ void adjust_ipiv_devfunc(magma_int_t *ipiv, int m, int offset)
{
int tid = threadIdx.x;
if (tid < m)
{
ipiv[tid] += offset;
}
}
/******************************************************************************/
__global__ void adjust_ipiv_kernel_batched(magma_int_t **ipiv_array, int m, int offset)
{
int batchid = blockIdx.x;
adjust_ipiv_devfunc(ipiv_array[batchid], m, offset);
}
/******************************************************************************/
__global__ void adjust_ipiv_kernel(magma_int_t *ipiv, int m, int offset)
{
adjust_ipiv_devfunc(ipiv, m, offset);
}
/******************************************************************************/
extern "C" void
adjust_ipiv_batched( magma_int_t **ipiv_array,
magma_int_t m, magma_int_t offset,
magma_int_t batchCount, magma_queue_t queue)
{
if (offset == 0 ) return;
if ( m > MAX_NTHREADS)
{
fprintf( stderr, "%s: m=%lld > %lld, not supported\n",
__func__, (long long) m, (long long) MAX_NTHREADS );
return;
}
hipLaunchKernelGGL(( adjust_ipiv_kernel_batched)
, dim3(batchCount), dim3(m), 0, queue->cuda_stream() ,
ipiv_array, m, offset);
}
/******************************************************************************/
extern "C" void
adjust_ipiv( magma_int_t *ipiv,
magma_int_t m, magma_int_t offset,
magma_queue_t queue)
{
if (offset == 0 ) return;
if ( m > 1024)
{
fprintf( stderr, "%s: m=%lld > %lld, not supported\n",
__func__, (long long) m, (long long) MAX_NTHREADS );
return;
}
hipLaunchKernelGGL(( adjust_ipiv_kernel)
, dim3(1), dim3(m), 0, queue->cuda_stream() ,
ipiv, m, offset);
}
|
d33095682738c0418ef94c224126b76355b86857.cu
|
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
// =============================================================================
// Auxiliary routine to compute piv final destination for the current step
/******************************************************************************/
static __device__ void setup_pivinfo_devfunc(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
// initialize pivinfo (could be done in a separate kernel using multiple thread block
for (int s =0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS < m) && (tid < MAX_NTHREADS) )
pivinfo[tid + s * MAX_NTHREADS] = tid + s * MAX_NTHREADS + 1;
}
__syncthreads();
if (tid == 0)
{
int i, itsreplacement, mynewrowid;
for (i=0; i < nb; i++) {
mynewrowid = ipiv[i]-1; //-1 to get the index in C
itsreplacement = pivinfo[mynewrowid];
pivinfo[mynewrowid] = pivinfo[i];
pivinfo[i] = itsreplacement;
}
}
}
/******************************************************************************/
__global__ void setup_pivinfo_kernel_batched(magma_int_t **pivinfo_array, magma_int_t **ipiv_array, int m, int nb)
{
int batchid = blockIdx.x;
setup_pivinfo_devfunc(pivinfo_array[batchid], ipiv_array[batchid], m, nb);
}
/******************************************************************************/
__global__ void setup_pivinfo_kernel(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
setup_pivinfo_devfunc(pivinfo, ipiv, m, nb);
}
/******************************************************************************/
extern "C" void
setup_pivinfo_batched( magma_int_t **pivinfo_array, magma_int_t **ipiv_array,
magma_int_t m, magma_int_t nb,
magma_int_t batchCount,
magma_queue_t queue)
{
if (nb == 0 ) return;
setup_pivinfo_kernel_batched
<<< batchCount, min(m, MAX_NTHREADS), 0, queue->cuda_stream() >>>
(pivinfo_array, ipiv_array, m, nb);
}
/******************************************************************************/
extern "C" void
setup_pivinfo( magma_int_t *pivinfo, magma_int_t *ipiv,
magma_int_t m, magma_int_t nb,
magma_queue_t queue)
{
if (nb == 0 ) return;
setup_pivinfo_kernel
<<< 1, min(m, MAX_NTHREADS), 0, queue->cuda_stream() >>>
(pivinfo, ipiv, m, nb);
}
// =============================================================================
// Auxiliary routine to adjust ipiv
/******************************************************************************/
static __device__ void adjust_ipiv_devfunc(magma_int_t *ipiv, int m, int offset)
{
int tid = threadIdx.x;
if (tid < m)
{
ipiv[tid] += offset;
}
}
/******************************************************************************/
__global__ void adjust_ipiv_kernel_batched(magma_int_t **ipiv_array, int m, int offset)
{
int batchid = blockIdx.x;
adjust_ipiv_devfunc(ipiv_array[batchid], m, offset);
}
/******************************************************************************/
__global__ void adjust_ipiv_kernel(magma_int_t *ipiv, int m, int offset)
{
adjust_ipiv_devfunc(ipiv, m, offset);
}
/******************************************************************************/
extern "C" void
adjust_ipiv_batched( magma_int_t **ipiv_array,
magma_int_t m, magma_int_t offset,
magma_int_t batchCount, magma_queue_t queue)
{
if (offset == 0 ) return;
if ( m > MAX_NTHREADS)
{
fprintf( stderr, "%s: m=%lld > %lld, not supported\n",
__func__, (long long) m, (long long) MAX_NTHREADS );
return;
}
adjust_ipiv_kernel_batched
<<< batchCount, m, 0, queue->cuda_stream() >>>
(ipiv_array, m, offset);
}
/******************************************************************************/
extern "C" void
adjust_ipiv( magma_int_t *ipiv,
magma_int_t m, magma_int_t offset,
magma_queue_t queue)
{
if (offset == 0 ) return;
if ( m > 1024)
{
fprintf( stderr, "%s: m=%lld > %lld, not supported\n",
__func__, (long long) m, (long long) MAX_NTHREADS );
return;
}
adjust_ipiv_kernel
<<< 1, m, 0, queue->cuda_stream() >>>
(ipiv, m, offset);
}
|
bd7dbea75c524e7f6c7106a6399bf923f6e118d4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define RAYCASTER_ENABLE_DVR
#include "RaycasterKernelParams.h"
#ifdef RAYCASTER_ENABLE_DVR
#include "cudaUtil.h"
#include "RaycasterKernelDefines.h"
#include "RaycasterKernelGlobals.cuh"
#include "RaycasterKernelHelpers.cuh"
template <eMeasureSource measureSource, eTextureFilterMode F, eMeasureComputeMode C, eColorMode CM>
__global__ void dvrKernel(
int2 brickMinScreen,
int2 brickSizeScreen,
int2 renderTargetOffset,
float3 boxMin,
float3 boxMax,
float3 world2texOffset,
float3 world2texScale
)
{
const float opacityThreshold = 0.999f;
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= brickSizeScreen.x) || (y >= brickSizeScreen.y)) return;
x += brickMinScreen.x;
y += brickMinScreen.y;
// calculate eye ray in world space
float3 rayPos = getRayPos(c_raycastParams.viewInv);
float3 rayDir = getRayDir(c_raycastParams.viewInv, x, y);
x += renderTargetOffset.x;
y += renderTargetOffset.y;
// find intersection with box
float tnear, tfar;
if (!intersectBox(rayPos, rayDir, boxMin, boxMax, &tnear, &tfar)) return;
tnear = fmaxf(tnear, 0.0f); // clamp to near plane
// current position and step increment in world space
float3 pos = rayPos + rayDir * tnear;
float3 step = rayDir * c_raycastParams.stepSizeWorld;
float depthLinear = -transformPos(c_raycastParams.view, pos).z;
float depthStepLinear = -transformDir(c_raycastParams.view, step).z;
// read depth buffer
float depthMax;
surf2Dread(&depthMax, g_surfDepth, x * sizeof(float), y);
float depthMaxLinear = depthToLinear(depthMax);
// restrict depthMaxLinear to exit point depth, so we can use it as stop criterion
depthMaxLinear = min(depthMaxLinear, -transformPos(c_raycastParams.view, rayPos + rayDir * tfar).z);
if(depthLinear >= depthMaxLinear) return;
// get initial color from render target
uchar4 colorStart;
surf2Dread(&colorStart, g_surfTarget, x * 4, y);
float4 sum = rgbaUCharToFloat(colorStart);
float stepFactor = c_raycastParams.stepSizeWorld * c_raycastParams.density;
// march along ray from front to back, accumulating color
while((depthLinear + depthStepLinear) < depthMaxLinear && sum.w < opacityThreshold)
{
float4 color = getColor<measureSource, F, C, false>(c_raycastParams.measure1, g_texVolume1, w2t(pos), rayDir, c_raycastParams.gridSpacing,
stepFactor, c_raycastParams.transferOffset, c_raycastParams.transferScale, c_raycastParams.tfAlphaScale, c_raycastParams.measureScale1);
sum += (1.0f - sum.w) * color;
pos += step;
depthLinear += depthStepLinear;
}
// do last (partial) step
float lastStepRatio = min((depthMaxLinear - depthLinear) / depthStepLinear, 1.0f);
float4 color = getColor<measureSource, F, C, false>(c_raycastParams.measure1, g_texVolume1, w2t(pos), rayDir, c_raycastParams.gridSpacing,
stepFactor * lastStepRatio, c_raycastParams.transferOffset, c_raycastParams.transferScale, c_raycastParams.tfAlphaScale, c_raycastParams.measureScale1);
sum += (1.0f - sum.w) * color;
// write output color
surf2Dwrite(rgbaFloatToUChar(sum), g_surfTarget, x * 4, y);
}
#endif
void raycasterKernelDvr(RaycasterKernelParams& params)
{
#ifdef RAYCASTER_ENABLE_DVR
// color mode isn't used -> directly call RAYCASTER_COMPUTE_SWITCH
switch(params.filterMode) {
#ifdef RAYCASTER_ENABLE_LINEAR
case TEXTURE_FILTER_LINEAR : RAYCASTER_COMPUTE_SWITCH_RT(dvrKernel, TEXTURE_FILTER_LINEAR, COLOR_MODE_UI); break;
#endif
#ifdef RAYCASTER_ENABLE_CUBIC
case TEXTURE_FILTER_CUBIC : RAYCASTER_COMPUTE_SWITCH_RT(dvrKernel, TEXTURE_FILTER_CUBIC, COLOR_MODE_UI); break;
#endif
}
#endif
}
|
bd7dbea75c524e7f6c7106a6399bf923f6e118d4.cu
|
#define RAYCASTER_ENABLE_DVR
#include "RaycasterKernelParams.h"
#ifdef RAYCASTER_ENABLE_DVR
#include "cudaUtil.h"
#include "RaycasterKernelDefines.h"
#include "RaycasterKernelGlobals.cuh"
#include "RaycasterKernelHelpers.cuh"
template <eMeasureSource measureSource, eTextureFilterMode F, eMeasureComputeMode C, eColorMode CM>
__global__ void dvrKernel(
int2 brickMinScreen,
int2 brickSizeScreen,
int2 renderTargetOffset,
float3 boxMin,
float3 boxMax,
float3 world2texOffset,
float3 world2texScale
)
{
const float opacityThreshold = 0.999f;
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= brickSizeScreen.x) || (y >= brickSizeScreen.y)) return;
x += brickMinScreen.x;
y += brickMinScreen.y;
// calculate eye ray in world space
float3 rayPos = getRayPos(c_raycastParams.viewInv);
float3 rayDir = getRayDir(c_raycastParams.viewInv, x, y);
x += renderTargetOffset.x;
y += renderTargetOffset.y;
// find intersection with box
float tnear, tfar;
if (!intersectBox(rayPos, rayDir, boxMin, boxMax, &tnear, &tfar)) return;
tnear = fmaxf(tnear, 0.0f); // clamp to near plane
// current position and step increment in world space
float3 pos = rayPos + rayDir * tnear;
float3 step = rayDir * c_raycastParams.stepSizeWorld;
float depthLinear = -transformPos(c_raycastParams.view, pos).z;
float depthStepLinear = -transformDir(c_raycastParams.view, step).z;
// read depth buffer
float depthMax;
surf2Dread(&depthMax, g_surfDepth, x * sizeof(float), y);
float depthMaxLinear = depthToLinear(depthMax);
// restrict depthMaxLinear to exit point depth, so we can use it as stop criterion
depthMaxLinear = min(depthMaxLinear, -transformPos(c_raycastParams.view, rayPos + rayDir * tfar).z);
if(depthLinear >= depthMaxLinear) return;
// get initial color from render target
uchar4 colorStart;
surf2Dread(&colorStart, g_surfTarget, x * 4, y);
float4 sum = rgbaUCharToFloat(colorStart);
float stepFactor = c_raycastParams.stepSizeWorld * c_raycastParams.density;
// march along ray from front to back, accumulating color
while((depthLinear + depthStepLinear) < depthMaxLinear && sum.w < opacityThreshold)
{
float4 color = getColor<measureSource, F, C, false>(c_raycastParams.measure1, g_texVolume1, w2t(pos), rayDir, c_raycastParams.gridSpacing,
stepFactor, c_raycastParams.transferOffset, c_raycastParams.transferScale, c_raycastParams.tfAlphaScale, c_raycastParams.measureScale1);
sum += (1.0f - sum.w) * color;
pos += step;
depthLinear += depthStepLinear;
}
// do last (partial) step
float lastStepRatio = min((depthMaxLinear - depthLinear) / depthStepLinear, 1.0f);
float4 color = getColor<measureSource, F, C, false>(c_raycastParams.measure1, g_texVolume1, w2t(pos), rayDir, c_raycastParams.gridSpacing,
stepFactor * lastStepRatio, c_raycastParams.transferOffset, c_raycastParams.transferScale, c_raycastParams.tfAlphaScale, c_raycastParams.measureScale1);
sum += (1.0f - sum.w) * color;
// write output color
surf2Dwrite(rgbaFloatToUChar(sum), g_surfTarget, x * 4, y);
}
#endif
void raycasterKernelDvr(RaycasterKernelParams& params)
{
#ifdef RAYCASTER_ENABLE_DVR
// color mode isn't used -> directly call RAYCASTER_COMPUTE_SWITCH
switch(params.filterMode) {
#ifdef RAYCASTER_ENABLE_LINEAR
case TEXTURE_FILTER_LINEAR : RAYCASTER_COMPUTE_SWITCH_RT(dvrKernel, TEXTURE_FILTER_LINEAR, COLOR_MODE_UI); break;
#endif
#ifdef RAYCASTER_ENABLE_CUBIC
case TEXTURE_FILTER_CUBIC : RAYCASTER_COMPUTE_SWITCH_RT(dvrKernel, TEXTURE_FILTER_CUBIC, COLOR_MODE_UI); break;
#endif
}
#endif
}
|
f66e946adb7d55814f10b6d33bba1351cc980a66.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Julia.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void julia(float c1, float c2, uchar4* ptrDevPixels, uint w, uint h, float t, DomaineMath domaineMath);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Julia::Julia(float c1, float c2, uint nMin, uint nMax, const Grid& grid, uint w, uint h, const DomaineMath& domaineMath) :
Animable_I<uchar4>(grid, w, h, "Julia_Cuda_RGBA_uchar4", domaineMath), variateurAnimation(Interval<float>(nMin, nMax), 1.f)
{
// Inputs
this->c1 = c1;
this->c2 = c2;
// Tools
this->t = nMin; // protected dans Animable
}
Julia::~Julia()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Julia::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
t = variateurAnimation.get();
hipLaunchKernelGGL(( julia) , dim3(dg),dim3(db), 0, 0, c1, c2, ptrDevPixels, w, h, t, domaineMath);
}
/**
* Override
* Call periodicly by the API
*/
void Julia::animationStep()
{
t = variateurAnimation.varierAndGet();
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
f66e946adb7d55814f10b6d33bba1351cc980a66.cu
|
#include "Julia.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void julia(float c1, float c2, uchar4* ptrDevPixels, uint w, uint h, float t, DomaineMath domaineMath);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Julia::Julia(float c1, float c2, uint nMin, uint nMax, const Grid& grid, uint w, uint h, const DomaineMath& domaineMath) :
Animable_I<uchar4>(grid, w, h, "Julia_Cuda_RGBA_uchar4", domaineMath), variateurAnimation(Interval<float>(nMin, nMax), 1.f)
{
// Inputs
this->c1 = c1;
this->c2 = c2;
// Tools
this->t = nMin; // protected dans Animable
}
Julia::~Julia()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Julia::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
t = variateurAnimation.get();
julia <<<dg,db>>>(c1, c2, ptrDevPixels, w, h, t, domaineMath);
}
/**
* Override
* Call periodicly by the API
*/
void Julia::animationStep()
{
t = variateurAnimation.varierAndGet();
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
3a92a32ac781b3525fd94c6362a9eb4cf29528d5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include "hip/hip_runtime.h"
#include "CudaCoreUtils.h"
const dim3 CudaCoreUtils::DEFAULT_GRID_DIM(65535, 65535, 65535);
std::map<unsigned int, const dim3> CudaCoreUtils::gridDimMap = CudaCoreUtils::getGridDimList();
std::string CudaCoreUtils::getClassName(){
return MacroUtils_ClassName(CudaCoreUtils);
}
std::map<unsigned int, const dim3> CudaCoreUtils::getGridDimList(){
int count = 0;
hipDeviceProp_t prop;
std::map<unsigned int, const dim3> map;
hipError_t error = hipGetDeviceCount(&count);
if (error != hipSuccess)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(hipGetDeviceCount), hipGetErrorString(error));
for (unsigned int ii = 0; ii < count; ++ii){
if (hipGetDeviceProperties(&prop, ii) != hipSuccess)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(hipGetDeviceProperties), hipGetErrorString(error));
else
map.emplace(ii, dim3(prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]));
}
return map;
}
const dim3 CudaCoreUtils::getGridDim(const unsigned int &deviceId){
std::map<unsigned int, const dim3>::iterator iter = gridDimMap.find(deviceId);
if (iter == gridDimMap.end()){
return DEFAULT_GRID_DIM;
}
return iter->second;
}
void CudaCoreUtils::setDevice(int device){
hipError_t error = hipSetDevice(device);
if (hipSuccess != error)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(hipSetDevice), hipGetErrorString(error));
}
void CudaCoreUtils::resetDevice(){
hipError_t error = hipDeviceReset();
if (hipSuccess != error)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(hipDeviceReset), hipGetErrorString(error));
}
int CudaCoreUtils::getDeviceCount(){
int count = 0;
hipError_t error = hipGetDeviceCount(&count);
if (hipSuccess != error)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(hipGetDeviceCount), hipGetErrorString(error));
return count;
}
void CudaCoreUtils::getDeviceProperties(int device, hipDeviceProp_t &prop){
hipError_t error = hipGetDeviceProperties(&prop, device);
if (hipSuccess != error)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(hipGetDeviceProperties), hipGetErrorString(error));
}
void CudaCoreUtils::printDeviceProperties(){
int count = getDeviceCount();
hipDeviceProp_t prop;
for (int ii = 0; ii < count; ii++){
getDeviceProperties(ii, prop);
printf("###############################################\n");
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %d.\n", prop.totalGlobalMem);
printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %d.\n", prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %d.\n", prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %d.\n", prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
printf("###############################################\n");
}
}
|
3a92a32ac781b3525fd94c6362a9eb4cf29528d5.cu
|
#include "common.h"
#include "cuda_runtime.h"
#include "CudaCoreUtils.h"
const dim3 CudaCoreUtils::DEFAULT_GRID_DIM(65535, 65535, 65535);
std::map<unsigned int, const dim3> CudaCoreUtils::gridDimMap = CudaCoreUtils::getGridDimList();
std::string CudaCoreUtils::getClassName(){
return MacroUtils_ClassName(CudaCoreUtils);
}
std::map<unsigned int, const dim3> CudaCoreUtils::getGridDimList(){
int count = 0;
cudaDeviceProp prop;
std::map<unsigned int, const dim3> map;
cudaError_t error = cudaGetDeviceCount(&count);
if (error != cudaSuccess)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(cudaGetDeviceCount), cudaGetErrorString(error));
for (unsigned int ii = 0; ii < count; ++ii){
if (cudaGetDeviceProperties(&prop, ii) != cudaSuccess)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(cudaGetDeviceProperties), cudaGetErrorString(error));
else
map.emplace(ii, dim3(prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]));
}
return map;
}
const dim3 CudaCoreUtils::getGridDim(const unsigned int &deviceId){
std::map<unsigned int, const dim3>::iterator iter = gridDimMap.find(deviceId);
if (iter == gridDimMap.end()){
return DEFAULT_GRID_DIM;
}
return iter->second;
}
void CudaCoreUtils::setDevice(int device){
cudaError_t error = cudaSetDevice(device);
if (cudaSuccess != error)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(cudaSetDevice), cudaGetErrorString(error));
}
void CudaCoreUtils::resetDevice(){
cudaError_t error = cudaDeviceReset();
if (cudaSuccess != error)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(cudaDeviceReset), cudaGetErrorString(error));
}
int CudaCoreUtils::getDeviceCount(){
int count = 0;
cudaError_t error = cudaGetDeviceCount(&count);
if (cudaSuccess != error)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(cudaGetDeviceCount), cudaGetErrorString(error));
return count;
}
void CudaCoreUtils::getDeviceProperties(int device, cudaDeviceProp &prop){
cudaError_t error = cudaGetDeviceProperties(&prop, device);
if (cudaSuccess != error)
throw SystemException(SystemCodeEnum::CUDA_RUNTIME_ERROR,
getClassName(), MacroUtils_CurFunctionName(),
MacroUtils_FunctionName(cudaGetDeviceProperties), cudaGetErrorString(error));
}
void CudaCoreUtils::printDeviceProperties(){
int count = getDeviceCount();
cudaDeviceProp prop;
for (int ii = 0; ii < count; ii++){
getDeviceProperties(ii, prop);
printf("###############################################\n");
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %d.\n", prop.totalGlobalMem);
printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %d.\n", prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %d.\n", prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %d.\n", prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
printf("###############################################\n");
}
}
|
68fa6d6d3b3268a63de59a135b36325bfb6af970.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/flash_attn/flash_bwd_launch_template.h>
namespace pytorch_flash{
template<>
void run_mha_bwd_<cutlass::bfloat16_t, 160>(Flash_bwd_params ¶ms, hipStream_t stream, const bool configure) {
run_mha_bwd_hdim160<cutlass::bfloat16_t>(params, stream, configure);
}
} // namespace pytorch_flash
|
68fa6d6d3b3268a63de59a135b36325bfb6af970.cu
|
// Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/flash_attn/flash_bwd_launch_template.h>
namespace pytorch_flash{
template<>
void run_mha_bwd_<cutlass::bfloat16_t, 160>(Flash_bwd_params ¶ms, cudaStream_t stream, const bool configure) {
run_mha_bwd_hdim160<cutlass::bfloat16_t>(params, stream, configure);
}
} // namespace pytorch_flash
|
8827cf6ec27ffac9b231cb03bd298136876f7b11.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <sys/time.h>
#define M 8 //
#define K 8
/*
for (int mb = 0; mb < M; mb += Mtile)
for (int nb = 0; nb < N; nb += Ntile)
for (int kb = 0; kb < K; kb += Ktile)
{
// compute Mtile-by-Ntile-by-Ktile matrix product
for (int k = 0; k < Ktile; ++k)
for (int i = 0; i < Mtile; ++i)
for (int j = 0; j < Ntile; ++j)
{
int row = mb + i;
int col = nb + j;
C[row][col] +=
A[row][kb + k] * B[kb + k][col];
}
}
*/
__global__ void matmul(float *A, float *B, float *C, int N) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
for (int i=ii; i<ii+M; i++) {
for (int j=jj; j<jj+M; j++) {
float sum = 0.0f;
for (int k=0; k<N; k++) {
sum += A[N*i+k] * B[N*k+j];
}
C[N*i+j] = sum;
}
}
}
int main(int argc, char **argv) {
int N = atoi(argv[1]);
// Allocate memory space for matrices to cpu (host)
float * h_A = new float [N*N]; // First matrix
float * h_B = new float [N*N]; // Second matrix
float * h_C = new float [N*N]; // Result matrix
// Allocate memory space for matrices to gpu (device)
float *d_A, *d_B, *d_C; // Gpu allocations
int size = N * N * sizeof(float); // Byte size for cuda malloc
hipMalloc((void **) &d_A, size);
hipMalloc((void **) &d_B, size);
hipMalloc((void **) &d_C, size);
// Init cpu matrices with random values.
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
h_A[N*i+j] = drand48();
h_B[N*i+j] = drand48();
h_C[N*i+j] = 0;
}
}
// Copy matrices to gpu memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, size, hipMemcpyHostToDevice);
// Make gpu multiplication
struct timeval tic, toc;
gettimeofday(&tic, NULL); //Start timer
// Invoke gpu multiplication
dim3 grid(N/M/K, N/M/K); // Amount of 2 dimensional blocks per axis
dim3 block(K,K); // Thread size per axis
hipLaunchKernelGGL(( matmul), dim3(grid),dim3(block), 0, 0, d_A, d_B, d_C, N);
hipDeviceSynchronize();
// Calculate and print flops
gettimeofday(&toc, NULL); //End timer
double time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6;
printf("N=%d: %lf s (%lf GFlops)\n",N,time,2.*N*N*N/time/1e9); // Print flops for gpu multiplication
// Copy matrices back to cpu memory
hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost);
hipMemcpy(h_B, d_B, size, hipMemcpyDeviceToHost);
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Make controll by trivially recalculating matrix values and subtracting them from the current cpu output matrix (and time)
gettimeofday(&tic, NULL); //Start timer
#pragma omp parallel for
for (int i=0; i<N; i++) {
for (int k=0; k<N; k++) {
for (int j=0; j<N; j++) {
h_C[N*i+j] -= h_A[N*i+k] * h_B[N*k+j];
}
}
}
gettimeofday(&toc, NULL); //End timer
time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6;
printf("N=%d: %lf s (%lf GFlops)\n",N,time,2.*N*N*N/time/1e9); // Print flops for cpu multiplication
// Total error: Sum difference on each value between cpu and gpu calculation
float err = 0;
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
err += fabs(h_C[N*i+j]);
}
}
printf("error: %f\n",err/N/N); // Print total error
// Clear memory
delete[] h_A;
delete[] h_B;
delete[] h_C;
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
|
8827cf6ec27ffac9b231cb03bd298136876f7b11.cu
|
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <sys/time.h>
#define M 8 //
#define K 8
/*
for (int mb = 0; mb < M; mb += Mtile)
for (int nb = 0; nb < N; nb += Ntile)
for (int kb = 0; kb < K; kb += Ktile)
{
// compute Mtile-by-Ntile-by-Ktile matrix product
for (int k = 0; k < Ktile; ++k)
for (int i = 0; i < Mtile; ++i)
for (int j = 0; j < Ntile; ++j)
{
int row = mb + i;
int col = nb + j;
C[row][col] +=
A[row][kb + k] * B[kb + k][col];
}
}
*/
__global__ void matmul(float *A, float *B, float *C, int N) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
for (int i=ii; i<ii+M; i++) {
for (int j=jj; j<jj+M; j++) {
float sum = 0.0f;
for (int k=0; k<N; k++) {
sum += A[N*i+k] * B[N*k+j];
}
C[N*i+j] = sum;
}
}
}
int main(int argc, char **argv) {
int N = atoi(argv[1]);
// Allocate memory space for matrices to cpu (host)
float * h_A = new float [N*N]; // First matrix
float * h_B = new float [N*N]; // Second matrix
float * h_C = new float [N*N]; // Result matrix
// Allocate memory space for matrices to gpu (device)
float *d_A, *d_B, *d_C; // Gpu allocations
int size = N * N * sizeof(float); // Byte size for cuda malloc
cudaMalloc((void **) &d_A, size);
cudaMalloc((void **) &d_B, size);
cudaMalloc((void **) &d_C, size);
// Init cpu matrices with random values.
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
h_A[N*i+j] = drand48();
h_B[N*i+j] = drand48();
h_C[N*i+j] = 0;
}
}
// Copy matrices to gpu memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
// Make gpu multiplication
struct timeval tic, toc;
gettimeofday(&tic, NULL); //Start timer
// Invoke gpu multiplication
dim3 grid(N/M/K, N/M/K); // Amount of 2 dimensional blocks per axis
dim3 block(K,K); // Thread size per axis
matmul<<<grid,block>>>(d_A, d_B, d_C, N);
cudaDeviceSynchronize();
// Calculate and print flops
gettimeofday(&toc, NULL); //End timer
double time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6;
printf("N=%d: %lf s (%lf GFlops)\n",N,time,2.*N*N*N/time/1e9); // Print flops for gpu multiplication
// Copy matrices back to cpu memory
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Make controll by trivially recalculating matrix values and subtracting them from the current cpu output matrix (and time)
gettimeofday(&tic, NULL); //Start timer
#pragma omp parallel for
for (int i=0; i<N; i++) {
for (int k=0; k<N; k++) {
for (int j=0; j<N; j++) {
h_C[N*i+j] -= h_A[N*i+k] * h_B[N*k+j];
}
}
}
gettimeofday(&toc, NULL); //End timer
time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6;
printf("N=%d: %lf s (%lf GFlops)\n",N,time,2.*N*N*N/time/1e9); // Print flops for cpu multiplication
// Total error: Sum difference on each value between cpu and gpu calculation
float err = 0;
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
err += fabs(h_C[N*i+j]);
}
}
printf("error: %f\n",err/N/N); // Print total error
// Clear memory
delete[] h_A;
delete[] h_B;
delete[] h_C;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
03c2027852fff645c890fe01b59ebed9fdc329dd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <unistd.h>
#include "hip/hip_runtime.h"
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err){
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(-1);
}
}
__global__ void myFirstKernel(int *d_a )
{
int i= blockIdx.x;
int j=threadIdx.x;
d_a[i * blockDim.x + j] += 1000 * i + j;
}
int main(int argc, char** argv)
{
int *h_a;
int *d_a;
int numBlocks = 256;
int numThreadsPerBlock = 256;
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int)* 64;
int device;
hipGetDevice(&device);
printf("enter cuda program\n");
printf("device: %d\n", device);
checkCUDAError("hipSetDevice");
h_a = (int *)malloc(memSize);
hipMalloc((void**)&d_a, memSize);
checkCUDAError("hipMalloc");
hipMemcpy(d_a, h_a, memSize, hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy");
sleep(1);
hipLaunchKernelGGL(( myFirstKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_a);
checkCUDAError("kernel execution");
sleep(1);
hipMemcpy(h_a, d_a, memSize, hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy");
hipFree(d_a);
free(h_a);
return 0;
}
|
03c2027852fff645c890fe01b59ebed9fdc329dd.cu
|
#include <stdio.h>
#include <unistd.h>
#include "cuda.h"
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err){
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(-1);
}
}
__global__ void myFirstKernel(int *d_a )
{
int i= blockIdx.x;
int j=threadIdx.x;
d_a[i * blockDim.x + j] += 1000 * i + j;
}
int main(int argc, char** argv)
{
int *h_a;
int *d_a;
int numBlocks = 256;
int numThreadsPerBlock = 256;
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int)* 64;
int device;
cudaGetDevice(&device);
printf("enter cuda program\n");
printf("device: %d\n", device);
checkCUDAError("cudaSetDevice");
h_a = (int *)malloc(memSize);
cudaMalloc((void**)&d_a, memSize);
checkCUDAError("cudaMalloc");
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy");
sleep(1);
myFirstKernel<<<numBlocks, numThreadsPerBlock>>>(d_a);
checkCUDAError("kernel execution");
sleep(1);
cudaMemcpy(h_a, d_a, memSize, cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy");
cudaFree(d_a);
free(h_a);
return 0;
}
|
7ba90cd7ff593b76e0c7c8371e4b746a6724f890.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/emulation.hpp"
#include "opencv2/gpu/device/dynamic_smem.hpp"
namespace cv { namespace gpu { namespace device
{
namespace hough
{
__device__ static int g_counter;
////////////////////////////////////////////////////////////////////////
// linesAccum
__global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
{
const int n = blockIdx.x;
const float ang = n * theta;
float sinVal;
float cosVal;
sincosf(ang, &sinVal, &cosVal);
sinVal *= irho;
cosVal *= irho;
const int shift = (numrho - 1) / 2;
int* accumRow = accum.ptr(n + 1);
for (int i = threadIdx.x; i < count; i += blockDim.x)
{
const unsigned int val = list[i];
const int x = (val & 0xFFFF);
const int y = (val >> 16) & 0xFFFF;
int r = __float2int_rn(x * cosVal + y * sinVal);
r += shift;
::atomicAdd(accumRow + r + 1, 1);
}
}
__global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
{
int* smem = DynamicSharedMem<int>();
for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x)
smem[i] = 0;
__syncthreads();
const int n = blockIdx.x;
const float ang = n * theta;
float sinVal;
float cosVal;
sincosf(ang, &sinVal, &cosVal);
sinVal *= irho;
cosVal *= irho;
const int shift = (numrho - 1) / 2;
for (int i = threadIdx.x; i < count; i += blockDim.x)
{
const unsigned int val = list[i];
const int x = (val & 0xFFFF);
const int y = (val >> 16) & 0xFFFF;
int r = __float2int_rn(x * cosVal + y * sinVal);
r += shift;
Emulation::smem::atomicAdd(&smem[r + 1], 1);
}
__syncthreads();
int* accumRow = accum.ptr(n + 1);
for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x)
accumRow[i] = smem[i];
}
void linesAccum_gpu(const unsigned int* list, int count, PtrStepSzi accum, float rho, float theta, size_t sharedMemPerBlock, bool has20)
{
const dim3 block(has20 ? 1024 : 512);
const dim3 grid(accum.rows - 2);
size_t smemSize = (accum.cols - 1) * sizeof(int);
if (smemSize < sharedMemPerBlock - 1000)
hipLaunchKernelGGL(( linesAccumShared), dim3(grid), dim3(block), smemSize, 0, list, count, accum, 1.0f / rho, theta, accum.cols - 2);
else
hipLaunchKernelGGL(( linesAccumGlobal), dim3(grid), dim3(block), 0, 0, list, count, accum, 1.0f / rho, theta, accum.cols - 2);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// linesGetResult
__global__ void linesGetResult(const PtrStepSzi accum, float2* out, int* votes, const int maxSize, const float rho, const float theta, const int threshold, const int numrho)
{
const int r = blockIdx.x * blockDim.x + threadIdx.x;
const int n = blockIdx.y * blockDim.y + threadIdx.y;
if (r >= accum.cols - 2 || n >= accum.rows - 2)
return;
const int curVotes = accum(n + 1, r + 1);
if (curVotes > threshold &&
curVotes > accum(n + 1, r) &&
curVotes >= accum(n + 1, r + 2) &&
curVotes > accum(n, r + 1) &&
curVotes >= accum(n + 2, r + 1))
{
const float radius = (r - (numrho - 1) * 0.5f) * rho;
const float angle = n * theta;
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float2(radius, angle);
votes[ind] = curVotes;
}
}
}
int linesGetResult_gpu(PtrStepSzi accum, float2* out, int* votes, int maxSize, float rho, float theta, int threshold, bool doSort)
{
void* counterPtr;
cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y));
cudaSafeCall( hipFuncSetCacheConfig(linesGetResult, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( linesGetResult), dim3(grid), dim3(block), 0, 0, accum, out, votes, maxSize, rho, theta, threshold, accum.cols - 2);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
int totalCount;
cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
if (doSort && totalCount > 0)
{
thrust::device_ptr<float2> outPtr(out);
thrust::device_ptr<int> votesPtr(votes);
thrust::sort_by_key(votesPtr, votesPtr + totalCount, outPtr, thrust::greater<int>());
}
return totalCount;
}
}
}}}
#endif /* CUDA_DISABLER */
|
7ba90cd7ff593b76e0c7c8371e4b746a6724f890.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/emulation.hpp"
#include "opencv2/gpu/device/dynamic_smem.hpp"
namespace cv { namespace gpu { namespace device
{
namespace hough
{
__device__ static int g_counter;
////////////////////////////////////////////////////////////////////////
// linesAccum
__global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
{
const int n = blockIdx.x;
const float ang = n * theta;
float sinVal;
float cosVal;
sincosf(ang, &sinVal, &cosVal);
sinVal *= irho;
cosVal *= irho;
const int shift = (numrho - 1) / 2;
int* accumRow = accum.ptr(n + 1);
for (int i = threadIdx.x; i < count; i += blockDim.x)
{
const unsigned int val = list[i];
const int x = (val & 0xFFFF);
const int y = (val >> 16) & 0xFFFF;
int r = __float2int_rn(x * cosVal + y * sinVal);
r += shift;
::atomicAdd(accumRow + r + 1, 1);
}
}
__global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
{
int* smem = DynamicSharedMem<int>();
for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x)
smem[i] = 0;
__syncthreads();
const int n = blockIdx.x;
const float ang = n * theta;
float sinVal;
float cosVal;
sincosf(ang, &sinVal, &cosVal);
sinVal *= irho;
cosVal *= irho;
const int shift = (numrho - 1) / 2;
for (int i = threadIdx.x; i < count; i += blockDim.x)
{
const unsigned int val = list[i];
const int x = (val & 0xFFFF);
const int y = (val >> 16) & 0xFFFF;
int r = __float2int_rn(x * cosVal + y * sinVal);
r += shift;
Emulation::smem::atomicAdd(&smem[r + 1], 1);
}
__syncthreads();
int* accumRow = accum.ptr(n + 1);
for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x)
accumRow[i] = smem[i];
}
void linesAccum_gpu(const unsigned int* list, int count, PtrStepSzi accum, float rho, float theta, size_t sharedMemPerBlock, bool has20)
{
const dim3 block(has20 ? 1024 : 512);
const dim3 grid(accum.rows - 2);
size_t smemSize = (accum.cols - 1) * sizeof(int);
if (smemSize < sharedMemPerBlock - 1000)
linesAccumShared<<<grid, block, smemSize>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2);
else
linesAccumGlobal<<<grid, block>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// linesGetResult
__global__ void linesGetResult(const PtrStepSzi accum, float2* out, int* votes, const int maxSize, const float rho, const float theta, const int threshold, const int numrho)
{
const int r = blockIdx.x * blockDim.x + threadIdx.x;
const int n = blockIdx.y * blockDim.y + threadIdx.y;
if (r >= accum.cols - 2 || n >= accum.rows - 2)
return;
const int curVotes = accum(n + 1, r + 1);
if (curVotes > threshold &&
curVotes > accum(n + 1, r) &&
curVotes >= accum(n + 1, r + 2) &&
curVotes > accum(n, r + 1) &&
curVotes >= accum(n + 2, r + 1))
{
const float radius = (r - (numrho - 1) * 0.5f) * rho;
const float angle = n * theta;
const int ind = ::atomicAdd(&g_counter, 1);
if (ind < maxSize)
{
out[ind] = make_float2(radius, angle);
votes[ind] = curVotes;
}
}
}
int linesGetResult_gpu(PtrStepSzi accum, float2* out, int* votes, int maxSize, float rho, float theta, int threshold, bool doSort)
{
void* counterPtr;
cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) );
const dim3 block(32, 8);
const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(linesGetResult, cudaFuncCachePreferL1) );
linesGetResult<<<grid, block>>>(accum, out, votes, maxSize, rho, theta, threshold, accum.cols - 2);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
int totalCount;
cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
totalCount = ::min(totalCount, maxSize);
if (doSort && totalCount > 0)
{
thrust::device_ptr<float2> outPtr(out);
thrust::device_ptr<int> votesPtr(votes);
thrust::sort_by_key(votesPtr, votesPtr + totalCount, outPtr, thrust::greater<int>());
}
return totalCount;
}
}
}}}
#endif /* CUDA_DISABLER */
|
2e69b7ab8468acdb282f009d0fa34acba78bd863.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "hip/hip_runtime.h"
__global__ void
bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if ( tx == 0 )
input_node[ty] = input_cuda[index_in] ;
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index];
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){
int power_two = __powf(2, i);
if( ty % power_two == 0 )
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
__syncthreads();
}
//__syncthreads();
input_hidden_cuda[index] = weight_matrix[ty][tx];
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
__global__ void bpnn_adjust_weights_cuda(float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
//eta = 0.3;
//momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0){
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
|
2e69b7ab8468acdb282f009d0fa34acba78bd863.cu
|
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "math.h"
#include "cuda.h"
__global__ void
bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum,
int in,
int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if ( tx == 0 )
input_node[ty] = input_cuda[index_in] ;
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index];
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){
int power_two = __powf(2, i);
if( ty % power_two == 0 )
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
__syncthreads();
}
//__syncthreads();
input_hidden_cuda[index] = weight_matrix[ty][tx];
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
__global__ void bpnn_adjust_weights_cuda(float * delta,
int hid,
float * ly,
int in,
float * w,
float * oldw)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
//eta = 0.3;
//momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by ==0){
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
|
34461a5a2054b065f9fbc03b286cc8509b0878f6.hip
|
// !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////
//Ho Thien Luan -> History Tracking!
// 1. Ver_0: Approximate string matching with k-mismatches
// 2. Ver_1: Optimize by using sharing_memory for storing pattern
//
//
//
////////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "cuPrintf.hip"
#include <time.h>
#define FILENAME_MAXLEN 256
#define THREAD_BLOCK_EXP (7)
#define THREAD_BLOCK_SIZE (1 << THREAD_BLOCK_EXP)
__global__ void ASM_kernel(char *g_input_string, int input_size, int *g_pattern_decode, int real_pattern_size, int mask, int maskplus, int b, int *g_matched_result)
{
int tid = threadIdx.x ;
int gbid = blockIdx.y * gridDim.x + blockIdx.x ;
int start = gbid*THREAD_BLOCK_SIZE;
int start_tid = start + tid;
int pow_2b = 1 << b;
unsigned long long int bit_vector=0;
int t_shift;
//__shared__ char sub_string_shared [THREAD_BLOCK_SIZE + pattern_length - 1] ;
// __shared__ char sub_string_shared [256] ;
// int pow_2b = 1 << b;
// unsigned long long int bit_vector = 0;
// sub_string_shared[tid] = g_input_string[start+tid];
// if ( tid < (pattern_length - 1) ){
// sub_string_shared[THREAD_BLOCK_SIZE + tid] = g_input_string[start+THREAD_BLOCK_SIZE+tid];
// }
// __syncthreads();
////////////////////////////////////////////////////////////////
if (start_tid < input_size-real_pattern_size+1) {
for (int i = 0; i < real_pattern_size; i++) {
t_shift = i%real_pattern_size;
if (g_input_string[ start_tid+i ] == 'A') {
bit_vector = bit_vector + (((g_pattern_decode[0] << t_shift*b) & mask) | (g_pattern_decode[0] >> (real_pattern_size - t_shift)*b));
}
else if (g_input_string[ start_tid+i ] == 'C'){
bit_vector = bit_vector + (((g_pattern_decode[1] << t_shift*b) & mask) | (g_pattern_decode[1] >> (real_pattern_size - t_shift)*b));
}
else if (g_input_string[ start_tid+i ] == 'T'){
bit_vector = bit_vector + (((g_pattern_decode[2] << t_shift*b) & mask) | (g_pattern_decode[2] >> (real_pattern_size - t_shift)*b));
}
else if (g_input_string[ start_tid+i ] == 'G'){ //case of G
bit_vector = bit_vector + (((g_pattern_decode[3] << t_shift*b) & mask) | (g_pattern_decode[3] >> (real_pattern_size - t_shift)*b));
}
else { // can be char "\n"
bit_vector = bit_vector + maskplus;
}
}
//Get results
for (int j = 0; j < real_pattern_size ; j++) { //circular patterns
//h_matched_result[(i-real_pattern_size+1)*real_pattern_size+(real_pattern_size-1-j)] += ((bit_vector >> (k*real_pattern_size+j)) & 1);
g_matched_result[start_tid*real_pattern_size+j] = bit_vector % pow_2b;
bit_vector = bit_vector >> b;
}
//cuPrintf("threadIdx.x = %d \t ,start = %d, matrix_M = %d, matrix_B = %d, string = %s, pattern = %s, pattern_size = %d\n", tid, start_tid, g_matrix_M[(real_pattern_size+1)*(start_tid+i) + i],g_matrix_B[(real_pattern_size+1)*(start_tid+i) + i], g_input_string[i-1], g_pattern_circular[i-1], real_pattern_size);
}
}
////////////////////////////////
void ASM_process_top (char *g_input_string, size_t input_size, int *g_pattern_decode, int real_pattern_size, int mask, int maskplus, int b, int *g_matched_result)
{
// num_blocks = # of thread blocks to cover input stream
int num_blocks = (input_size-real_pattern_size+1)/THREAD_BLOCK_SIZE + 1 ;
dim3 dimBlock( THREAD_BLOCK_SIZE, 1 ) ;
dim3 dimGrid ;
int p = num_blocks >> 15 ;
dimGrid.x = num_blocks ;
if ( p ){
dimGrid.x = 1<<15 ;
dimGrid.y = p+1 ;
}
cudaPrintfInit();////for cuPrintf
hipLaunchKernelGGL(( ASM_kernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, g_input_string, input_size, g_pattern_decode, real_pattern_size, mask, maskplus, b, g_matched_result);
cudaPrintfDisplay();////for cuPrintf
cudaPrintfEnd(); ////for cuPrintf
}
////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
char inputFile[FILENAME_MAXLEN];
char patternFile[FILENAME_MAXLEN];
strcpy( inputFile, argv[2]) ;
strcpy( patternFile, argv[1]) ;
int k_par;
k_par = strtol(argv[3], NULL, 10);
////////////////////////////////////////////////////////////////////////////////////
//Process input patterns
int input_size;
int pattern_size;
int real_pattern_size;
char *h_input_string = NULL ;
char *h_pattern = NULL ;
int *h_matched_result = NULL ;
int *h_pattern_decode = (int*) malloc( sizeof(int)*4 ) ;
// step 1: read patterns and dump transition table
// int deviceID = 0 ;
// hipDeviceProp_t deviceProp;
// hipGetDeviceProperties(&deviceProp, deviceID);
//readPatternFromFile( patternFile) ;
//step 2: prepare input stream
FILE* fpin = fopen( inputFile, "rb");
assert ( NULL != fpin ) ;
// obtain file size
fseek (fpin , 0 , SEEK_END);
input_size = ftell (fpin);
rewind (fpin);
//step2: prepare input pattern
FILE* fpattern = fopen( patternFile, "rb");
assert ( NULL != fpattern ) ;
// obtain file size
fseek (fpattern , 0 , SEEK_END);
pattern_size = ftell (fpattern);
rewind (fpattern);
// allocate memory to contain the whole file
h_input_string = (char *) malloc (sizeof(char)*input_size);
assert( NULL != h_input_string );
h_pattern = (char *) malloc (sizeof(char)*pattern_size);
assert( NULL != h_pattern );
real_pattern_size = pattern_size-1;
h_matched_result = (int *) malloc (sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size);
assert( NULL != h_matched_result );
memset( h_matched_result, 0, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size) ;
// copy the file into the buffer
input_size = fread (h_input_string, 1, input_size, fpin);
fclose(fpin);
pattern_size = fread (h_pattern, 1, pattern_size, fpattern);
fclose(fpattern);
//printf("Cir string = %s, length = %d\n", h_pattern, real_pattern_size);
//ACSM Preprocess - Define table T[]
unsigned long long int T_A = 0;
int cal_A = 0;
unsigned long long int T_C = 0;
int cal_C = 0;
unsigned long long int T_T = 0;
int cal_T = 0;
unsigned long long int T_G = 0;
int cal_G = 0;
float sub_real_pattern_size = real_pattern_size+1;
float float_b = log2(sub_real_pattern_size);
int b = float_b;
if(b != float_b) {
b=b+1;
}
//int b = log2(sub_real_pattern_size)+1;
//int pow_2b = 1 << b;
//printf("#-pattern = %d, b=%d, float_b = %f, opw_2b = %d-#\n",real_pattern_size,b,float_b,pow_2b);
//for ( int i = real_pattern_size-1; i>=0; i--) {
for ( int i = 0; h_pattern[ i ]; i++) {
if(h_pattern[ i ] != '\n') {
//printf("Process for char: %c\n", h_pattern[ i ]);
if (h_pattern[ i ] == 'A') {
cal_A = 0;
cal_C = 1;
cal_T = 1;
cal_G = 1;
}
else if (h_pattern[ i ] == 'C'){
cal_A = 1;
cal_C = 0;
cal_T = 1;
cal_G = 1;
}
else if (h_pattern[ i ] == 'T'){
cal_A = 1;
cal_C = 1;
cal_T = 0;
cal_G = 1;
}
else if (h_pattern[ i ] == 'G'){
cal_A = 1;
cal_C = 1;
cal_T = 1;
cal_G = 0;
}
T_A = (T_A << b) + cal_A;
T_C = (T_C << b) + cal_C;
T_T = (T_T << b) + cal_T;
T_G = (T_G << b) + cal_G;
}
}
h_pattern_decode[0] = T_A;
h_pattern_decode[1] = T_C;
h_pattern_decode[2] = T_T;
h_pattern_decode[3] = T_G;
//printf("\nT_A: %d\n", T_A);
//printf("\nT_C: %d\n", T_C);
//printf("\nT_T: %d\n", T_T);
//printf("\nT_G: %d\n", T_G);
//shift-add bit-vector.
//unsigned long long int bit_vector=0;
//int t_shift = 0;
#define BIT(x) (1<<(x))
unsigned long long int mask = 0;
for (int i = 0; i < real_pattern_size*b ; i++) {
mask = (mask << 1) | 1;
}
unsigned long long int maskplus = 0;
for (int i = 0; i < real_pattern_size ; i++) {
maskplus = (maskplus << b) | 1;
}
/*
//ACSM process
struct timespec t_start, t_end;
double elapsedTime;
clock_gettime (CLOCK_REALTIME, &t_start);
//for (int i = 0; h_input_string [ i ] ; i++) {
for (int i = 0; i<input_size-(real_pattern_size-1) ; i++) {
bit_vector = 0 ;
for(int k=0; k< real_pattern_size; k++) {
t_shift = k%real_pattern_size;
//printf("Process for char: %c, T_A = %u, t_shift = %d\n", h_input_string[ i ], T_A, t_shift);
if (h_input_string[ i+k ] == 'A') {
bit_vector = bit_vector + ((T_A >> t_shift*b) & mask);
}
else if (h_input_string[ i+k ] == 'C'){
bit_vector = bit_vector + ((T_C >> t_shift*b) & mask);
}
else if (h_input_string[ i+k ] == 'T'){
bit_vector = bit_vector + ((T_T >> t_shift*b) & mask);
}
else if (h_input_string[ i+k ] == 'G'){ //case of G
bit_vector = bit_vector + ((T_G >> t_shift*b) & mask);
}
else { // can be char "\n"
bit_vector = bit_vector + maskplus;
}
//Get results
// for (int j = no_of_patterns-1; j >= 0; j--) {
// h_matched_result[i*no_of_patterns+j] = vector % pow_2b;
// vector = vector >> b;
// }
//printf("bit_vector: %u\n", bit_vector);
}
for (int j = 0; j < real_pattern_size ; j++) { //circular patterns
//h_matched_result[(i-real_pattern_size+1)*real_pattern_size+(real_pattern_size-1-j)] += ((bit_vector >> (k*real_pattern_size+j)) & 1);
h_matched_result[i*real_pattern_size+j] = bit_vector % pow_2b;
bit_vector = bit_vector >> b;
}
}// for h_input_string
clock_gettime(CLOCK_REALTIME, &t_end);
elapsedTime = (t_end.tv_sec*1000+t_end.tv_nsec/1000000)-(t_start.tv_sec*1000+t_start.tv_nsec/1000000);
*/
//Process in GPU
char *g_input_string;
int *g_matched_result;
int *g_pattern_decode;
hipMalloc (&g_input_string, sizeof(char)*input_size);
hipMalloc (&g_matched_result, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size);
hipMalloc (&g_pattern_decode, sizeof(int)*4);
hipMemcpy (g_input_string, h_input_string, sizeof(char)*input_size, hipMemcpyHostToDevice );
hipMemcpy (g_pattern_decode, h_pattern_decode, sizeof(int)*4, hipMemcpyHostToDevice);
// record time setting
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// step 3: run ASM on GPU
ASM_process_top ( g_input_string, input_size, g_pattern_decode, real_pattern_size, mask, maskplus, b, g_matched_result) ;
// record time setting
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipMemcpy (h_matched_result, g_matched_result, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size, hipMemcpyDeviceToHost );
// step 4: output matched result
int total_result = 0;
for (int i = 0; i < input_size-(real_pattern_size-1); i++) {
for (int j = 0; j < real_pattern_size; j++) {
//printf("At position %4d, circular pattern %4d : match pattern %d\n", i, j, h_matched_result[i*real_pattern_size + j]);
if(h_matched_result[i*real_pattern_size + j] <= k_par) {total_result++;}
}
}
printf("\n\n");
printf("############################################################\n");
printf("#--Approximate Circular String Matching with k-Mismatches--#\n");
printf("#----------------------------------------------------------#\n");
printf("#---------------Modified PCVM Alg. in GPU------------------#\n");
printf("############################################################\n");
printf("#--Pattern Length |\t\t %10d \t #\n",real_pattern_size);
printf("#----------------------------------------------------------#\n");
printf("#--Input Size (bytes) |\t\t %10d \t #\n", input_size );
printf("#----------------------------------------------------------#\n");
printf("#--Total matched with k = %d |\t\t %10d \t #\n", k_par, total_result);
printf("#----------------------------------------------------------#\n");
//printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", elapsedTime);
printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", time);
printf("#----------------------------------------------------------#\n");
//printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000000) );
//printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000) );
printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000000) );
printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000) );
printf("############################################################\n");
free(h_input_string);
free(h_pattern);
free(h_matched_result);
hipFree(g_input_string);
hipFree(g_pattern_decode);
hipFree(g_matched_result);
return 0;
}
|
34461a5a2054b065f9fbc03b286cc8509b0878f6.cu
|
////////////////////////////////////////////////////////////
//Ho Thien Luan -> History Tracking!
// 1. Ver_0: Approximate string matching with k-mismatches
// 2. Ver_1: Optimize by using sharing_memory for storing pattern
//
//
//
////////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <cuda.h>
#include <math.h>
#include <cuda_runtime.h>
#include "cuPrintf.cu"
#include <time.h>
#define FILENAME_MAXLEN 256
#define THREAD_BLOCK_EXP (7)
#define THREAD_BLOCK_SIZE (1 << THREAD_BLOCK_EXP)
__global__ void ASM_kernel(char *g_input_string, int input_size, int *g_pattern_decode, int real_pattern_size, int mask, int maskplus, int b, int *g_matched_result)
{
int tid = threadIdx.x ;
int gbid = blockIdx.y * gridDim.x + blockIdx.x ;
int start = gbid*THREAD_BLOCK_SIZE;
int start_tid = start + tid;
int pow_2b = 1 << b;
unsigned long long int bit_vector=0;
int t_shift;
//__shared__ char sub_string_shared [THREAD_BLOCK_SIZE + pattern_length - 1] ;
// __shared__ char sub_string_shared [256] ;
// int pow_2b = 1 << b;
// unsigned long long int bit_vector = 0;
// sub_string_shared[tid] = g_input_string[start+tid];
// if ( tid < (pattern_length - 1) ){
// sub_string_shared[THREAD_BLOCK_SIZE + tid] = g_input_string[start+THREAD_BLOCK_SIZE+tid];
// }
// __syncthreads();
////////////////////////////////////////////////////////////////
if (start_tid < input_size-real_pattern_size+1) {
for (int i = 0; i < real_pattern_size; i++) {
t_shift = i%real_pattern_size;
if (g_input_string[ start_tid+i ] == 'A') {
bit_vector = bit_vector + (((g_pattern_decode[0] << t_shift*b) & mask) | (g_pattern_decode[0] >> (real_pattern_size - t_shift)*b));
}
else if (g_input_string[ start_tid+i ] == 'C'){
bit_vector = bit_vector + (((g_pattern_decode[1] << t_shift*b) & mask) | (g_pattern_decode[1] >> (real_pattern_size - t_shift)*b));
}
else if (g_input_string[ start_tid+i ] == 'T'){
bit_vector = bit_vector + (((g_pattern_decode[2] << t_shift*b) & mask) | (g_pattern_decode[2] >> (real_pattern_size - t_shift)*b));
}
else if (g_input_string[ start_tid+i ] == 'G'){ //case of G
bit_vector = bit_vector + (((g_pattern_decode[3] << t_shift*b) & mask) | (g_pattern_decode[3] >> (real_pattern_size - t_shift)*b));
}
else { // can be char "\n"
bit_vector = bit_vector + maskplus;
}
}
//Get results
for (int j = 0; j < real_pattern_size ; j++) { //circular patterns
//h_matched_result[(i-real_pattern_size+1)*real_pattern_size+(real_pattern_size-1-j)] += ((bit_vector >> (k*real_pattern_size+j)) & 1);
g_matched_result[start_tid*real_pattern_size+j] = bit_vector % pow_2b;
bit_vector = bit_vector >> b;
}
//cuPrintf("threadIdx.x = %d \t ,start = %d, matrix_M = %d, matrix_B = %d, string = %s, pattern = %s, pattern_size = %d\n", tid, start_tid, g_matrix_M[(real_pattern_size+1)*(start_tid+i) + i],g_matrix_B[(real_pattern_size+1)*(start_tid+i) + i], g_input_string[i-1], g_pattern_circular[i-1], real_pattern_size);
}
}
////////////////////////////////
void ASM_process_top (char *g_input_string, size_t input_size, int *g_pattern_decode, int real_pattern_size, int mask, int maskplus, int b, int *g_matched_result)
{
// num_blocks = # of thread blocks to cover input stream
int num_blocks = (input_size-real_pattern_size+1)/THREAD_BLOCK_SIZE + 1 ;
dim3 dimBlock( THREAD_BLOCK_SIZE, 1 ) ;
dim3 dimGrid ;
int p = num_blocks >> 15 ;
dimGrid.x = num_blocks ;
if ( p ){
dimGrid.x = 1<<15 ;
dimGrid.y = p+1 ;
}
cudaPrintfInit();////for cuPrintf
ASM_kernel <<< dimGrid, dimBlock >>>(g_input_string, input_size, g_pattern_decode, real_pattern_size, mask, maskplus, b, g_matched_result);
cudaPrintfDisplay();////for cuPrintf
cudaPrintfEnd(); ////for cuPrintf
}
////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
char inputFile[FILENAME_MAXLEN];
char patternFile[FILENAME_MAXLEN];
strcpy( inputFile, argv[2]) ;
strcpy( patternFile, argv[1]) ;
int k_par;
k_par = strtol(argv[3], NULL, 10);
////////////////////////////////////////////////////////////////////////////////////
//Process input patterns
int input_size;
int pattern_size;
int real_pattern_size;
char *h_input_string = NULL ;
char *h_pattern = NULL ;
int *h_matched_result = NULL ;
int *h_pattern_decode = (int*) malloc( sizeof(int)*4 ) ;
// step 1: read patterns and dump transition table
// int deviceID = 0 ;
// cudaDeviceProp deviceProp;
// cudaGetDeviceProperties(&deviceProp, deviceID);
//readPatternFromFile( patternFile) ;
//step 2: prepare input stream
FILE* fpin = fopen( inputFile, "rb");
assert ( NULL != fpin ) ;
// obtain file size
fseek (fpin , 0 , SEEK_END);
input_size = ftell (fpin);
rewind (fpin);
//step2: prepare input pattern
FILE* fpattern = fopen( patternFile, "rb");
assert ( NULL != fpattern ) ;
// obtain file size
fseek (fpattern , 0 , SEEK_END);
pattern_size = ftell (fpattern);
rewind (fpattern);
// allocate memory to contain the whole file
h_input_string = (char *) malloc (sizeof(char)*input_size);
assert( NULL != h_input_string );
h_pattern = (char *) malloc (sizeof(char)*pattern_size);
assert( NULL != h_pattern );
real_pattern_size = pattern_size-1;
h_matched_result = (int *) malloc (sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size);
assert( NULL != h_matched_result );
memset( h_matched_result, 0, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size) ;
// copy the file into the buffer
input_size = fread (h_input_string, 1, input_size, fpin);
fclose(fpin);
pattern_size = fread (h_pattern, 1, pattern_size, fpattern);
fclose(fpattern);
//printf("Cir string = %s, length = %d\n", h_pattern, real_pattern_size);
//ACSM Preprocess - Define table T[]
unsigned long long int T_A = 0;
int cal_A = 0;
unsigned long long int T_C = 0;
int cal_C = 0;
unsigned long long int T_T = 0;
int cal_T = 0;
unsigned long long int T_G = 0;
int cal_G = 0;
float sub_real_pattern_size = real_pattern_size+1;
float float_b = log2(sub_real_pattern_size);
int b = float_b;
if(b != float_b) {
b=b+1;
}
//int b = log2(sub_real_pattern_size)+1;
//int pow_2b = 1 << b;
//printf("#-pattern = %d, b=%d, float_b = %f, opw_2b = %d-#\n",real_pattern_size,b,float_b,pow_2b);
//for ( int i = real_pattern_size-1; i>=0; i--) {
for ( int i = 0; h_pattern[ i ]; i++) {
if(h_pattern[ i ] != '\n') {
//printf("Process for char: %c\n", h_pattern[ i ]);
if (h_pattern[ i ] == 'A') {
cal_A = 0;
cal_C = 1;
cal_T = 1;
cal_G = 1;
}
else if (h_pattern[ i ] == 'C'){
cal_A = 1;
cal_C = 0;
cal_T = 1;
cal_G = 1;
}
else if (h_pattern[ i ] == 'T'){
cal_A = 1;
cal_C = 1;
cal_T = 0;
cal_G = 1;
}
else if (h_pattern[ i ] == 'G'){
cal_A = 1;
cal_C = 1;
cal_T = 1;
cal_G = 0;
}
T_A = (T_A << b) + cal_A;
T_C = (T_C << b) + cal_C;
T_T = (T_T << b) + cal_T;
T_G = (T_G << b) + cal_G;
}
}
h_pattern_decode[0] = T_A;
h_pattern_decode[1] = T_C;
h_pattern_decode[2] = T_T;
h_pattern_decode[3] = T_G;
//printf("\nT_A: %d\n", T_A);
//printf("\nT_C: %d\n", T_C);
//printf("\nT_T: %d\n", T_T);
//printf("\nT_G: %d\n", T_G);
//shift-add bit-vector.
//unsigned long long int bit_vector=0;
//int t_shift = 0;
#define BIT(x) (1<<(x))
unsigned long long int mask = 0;
for (int i = 0; i < real_pattern_size*b ; i++) {
mask = (mask << 1) | 1;
}
unsigned long long int maskplus = 0;
for (int i = 0; i < real_pattern_size ; i++) {
maskplus = (maskplus << b) | 1;
}
/*
//ACSM process
struct timespec t_start, t_end;
double elapsedTime;
clock_gettime (CLOCK_REALTIME, &t_start);
//for (int i = 0; h_input_string [ i ] ; i++) {
for (int i = 0; i<input_size-(real_pattern_size-1) ; i++) {
bit_vector = 0 ;
for(int k=0; k< real_pattern_size; k++) {
t_shift = k%real_pattern_size;
//printf("Process for char: %c, T_A = %u, t_shift = %d\n", h_input_string[ i ], T_A, t_shift);
if (h_input_string[ i+k ] == 'A') {
bit_vector = bit_vector + ((T_A >> t_shift*b) & mask);
}
else if (h_input_string[ i+k ] == 'C'){
bit_vector = bit_vector + ((T_C >> t_shift*b) & mask);
}
else if (h_input_string[ i+k ] == 'T'){
bit_vector = bit_vector + ((T_T >> t_shift*b) & mask);
}
else if (h_input_string[ i+k ] == 'G'){ //case of G
bit_vector = bit_vector + ((T_G >> t_shift*b) & mask);
}
else { // can be char "\n"
bit_vector = bit_vector + maskplus;
}
//Get results
// for (int j = no_of_patterns-1; j >= 0; j--) {
// h_matched_result[i*no_of_patterns+j] = vector % pow_2b;
// vector = vector >> b;
// }
//printf("bit_vector: %u\n", bit_vector);
}
for (int j = 0; j < real_pattern_size ; j++) { //circular patterns
//h_matched_result[(i-real_pattern_size+1)*real_pattern_size+(real_pattern_size-1-j)] += ((bit_vector >> (k*real_pattern_size+j)) & 1);
h_matched_result[i*real_pattern_size+j] = bit_vector % pow_2b;
bit_vector = bit_vector >> b;
}
}// for h_input_string
clock_gettime(CLOCK_REALTIME, &t_end);
elapsedTime = (t_end.tv_sec*1000+t_end.tv_nsec/1000000)-(t_start.tv_sec*1000+t_start.tv_nsec/1000000);
*/
//Process in GPU
char *g_input_string;
int *g_matched_result;
int *g_pattern_decode;
cudaMalloc (&g_input_string, sizeof(char)*input_size);
cudaMalloc (&g_matched_result, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size);
cudaMalloc (&g_pattern_decode, sizeof(int)*4);
cudaMemcpy (g_input_string, h_input_string, sizeof(char)*input_size, cudaMemcpyHostToDevice );
cudaMemcpy (g_pattern_decode, h_pattern_decode, sizeof(int)*4, cudaMemcpyHostToDevice);
// record time setting
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// step 3: run ASM on GPU
ASM_process_top ( g_input_string, input_size, g_pattern_decode, real_pattern_size, mask, maskplus, b, g_matched_result) ;
// record time setting
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaMemcpy (h_matched_result, g_matched_result, sizeof(int)*(input_size-real_pattern_size+1)*real_pattern_size, cudaMemcpyDeviceToHost );
// step 4: output matched result
int total_result = 0;
for (int i = 0; i < input_size-(real_pattern_size-1); i++) {
for (int j = 0; j < real_pattern_size; j++) {
//printf("At position %4d, circular pattern %4d : match pattern %d\n", i, j, h_matched_result[i*real_pattern_size + j]);
if(h_matched_result[i*real_pattern_size + j] <= k_par) {total_result++;}
}
}
printf("\n\n");
printf("############################################################\n");
printf("#--Approximate Circular String Matching with k-Mismatches--#\n");
printf("#----------------------------------------------------------#\n");
printf("#---------------Modified PCVM Alg. in GPU------------------#\n");
printf("############################################################\n");
printf("#--Pattern Length |\t\t %10d \t #\n",real_pattern_size);
printf("#----------------------------------------------------------#\n");
printf("#--Input Size (bytes) |\t\t %10d \t #\n", input_size );
printf("#----------------------------------------------------------#\n");
printf("#--Total matched with k = %d |\t\t %10d \t #\n", k_par, total_result);
printf("#----------------------------------------------------------#\n");
//printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", elapsedTime);
printf("#--Total elapsed time (ms) |\t\t %10f \t #\n", time);
printf("#----------------------------------------------------------#\n");
//printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000000) );
//printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(elapsedTime*1000) );
printf("#--Throughput Result (Gbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000000) );
printf("#--Throughput Result (Mbps) |\t\t %10f \t #\n", (float)(input_size*8)/(time*1000) );
printf("############################################################\n");
free(h_input_string);
free(h_pattern);
free(h_matched_result);
cudaFree(g_input_string);
cudaFree(g_pattern_decode);
cudaFree(g_matched_result);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.